aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2024-06-22 00:40:59 +0200
committerGitHub <noreply@github.com>2024-06-22 00:40:59 +0200
commit73a2232720898acfee26588520f795b2d97d0000 (patch)
treeeea8d44ceb17b827493f1e97571cc669515f7806
parent9b78ddf3b2abfb3e2063e3dad2a326f5eabc1618 (diff)
downloadllvm-73a2232720898acfee26588520f795b2d97d0000.zip
llvm-73a2232720898acfee26588520f795b2d97d0000.tar.gz
llvm-73a2232720898acfee26588520f795b2d97d0000.tar.bz2
AMDGPU: Materialize bitwise not of inline immediates (#95960)
If we have a bitwise negated inline immediate, we can materialize it with s_not_b32/v_not_b32. This mirrors the current bitreverse handling. As a side effect, we also now handle the bitreversed FP immediate case. One test shows some VOPD regressions on gfx11 which should probably be fixed. Previously the 2 v_mov_b32 could be packed, but now the mismatched opcode + mov can't. This problem already already existed for the bfrev case, it just happens more often now.
-rw-r--r--llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp58
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sub.v2i16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll211
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/med3-knownbits.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/permute_i8.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/shrink-insts-scalar-bit-ops.mir22
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/urem64.ll2
16 files changed, 310 insertions, 92 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 647fae9..79bcf5e 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -45,7 +45,6 @@ public:
bool isKImmOperand(const MachineOperand &Src) const;
bool isKUImmOperand(const MachineOperand &Src) const;
bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const;
- bool isReverseInlineImm(const MachineOperand &Src, int32_t &ReverseImm) const;
void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const;
void shrinkScalarCompare(MachineInstr &MI) const;
void shrinkMIMG(MachineInstr &MI) const;
@@ -183,15 +182,36 @@ bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src,
return false;
}
-/// \returns true if the constant in \p Src should be replaced with a bitreverse
-/// of an inline immediate.
-bool SIShrinkInstructions::isReverseInlineImm(const MachineOperand &Src,
- int32_t &ReverseImm) const {
- if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
- return false;
+/// \returns the opcode of an instruction a move immediate of the constant \p
+/// Src can be replaced with if the constant is replaced with \p ModifiedImm.
+/// i.e.
+///
+/// If the bitreverse of a constant is an inline immediate, reverse the
+/// immediate and return the bitreverse opcode.
+///
+/// If the bitwise negation of a constant is an inline immediate, reverse the
+/// immediate and return the bitwise not opcode.
+static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII,
+ const MachineOperand &Src,
+ int32_t &ModifiedImm, bool Scalar) {
+ if (TII->isInlineConstant(Src))
+ return 0;
+ int32_t SrcImm = static_cast<int32_t>(Src.getImm());
+
+ if (!Scalar) {
+ // We could handle the scalar case with here, but we would need to check
+ // that SCC is not live as S_NOT_B32 clobbers it. It's probably not worth
+ // it, as the reasonable values are already covered by s_movk_i32.
+ ModifiedImm = ~SrcImm;
+ if (TII->isInlineConstant(APInt(32, ModifiedImm)))
+ return AMDGPU::V_NOT_B32_e32;
+ }
+
+ ModifiedImm = reverseBits<int32_t>(SrcImm);
+ if (TII->isInlineConstant(APInt(32, ModifiedImm)))
+ return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
- ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
- return ReverseImm >= -16 && ReverseImm <= 64;
+ return 0;
}
/// Copy implicit register operands from specified instruction to this
@@ -801,10 +821,12 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
- int32_t ReverseImm;
- if (isReverseInlineImm(Src, ReverseImm)) {
- MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
- Src.setImm(ReverseImm);
+ int32_t ModImm;
+ unsigned ModOpcode =
+ canModifyToInlineImmOp32(TII, Src, ModImm, /*Scalar=*/false);
+ if (ModOpcode != 0) {
+ MI.setDesc(TII->get(ModOpcode));
+ Src.setImm(static_cast<int64_t>(ModImm));
continue;
}
}
@@ -863,13 +885,15 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MachineOperand &Src = MI.getOperand(1);
if (Src.isImm() && Dst.getReg().isPhysical()) {
- int32_t ReverseImm;
+ unsigned ModOpc;
+ int32_t ModImm;
if (isKImmOperand(Src)) {
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
Src.setImm(SignExtend64(Src.getImm(), 32));
- } else if (isReverseInlineImm(Src, ReverseImm)) {
- MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
- Src.setImm(ReverseImm);
+ } else if ((ModOpc = canModifyToInlineImmOp32(TII, Src, ModImm,
+ /*Scalar=*/true))) {
+ MI.setDesc(TII->get(ModOpc));
+ Src.setImm(static_cast<int64_t>(ModImm));
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
index 496ee9f..c8b8271 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
@@ -178,7 +178,7 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_splat(<2 x i16> %a) {
; GFX8-LABEL: v_add_v2i16_neg_inline_imm_splat:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffc0
+; GFX8-NEXT: v_not_b32_e32 v1, 63
; GFX8-NEXT: v_add_u16_e32 v2, 0xffc0, v0
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
@@ -244,7 +244,7 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_hi(<2 x i16> %a) {
; GFX8-LABEL: v_add_v2i16_neg_inline_imm_hi:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffc0
+; GFX8-NEXT: v_not_b32_e32 v1, 63
; GFX8-NEXT: v_add_u16_e32 v2, 4, v0
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index 4df5fa1..afffebe 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -1486,7 +1486,7 @@ define amdgpu_ps i24 @s_fshl_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX6: ; %bb.0:
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v1, 23
; GFX6-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x170001
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1516,7 +1516,7 @@ define amdgpu_ps i24 @s_fshl_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX8: ; %bb.0:
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v1, 23
; GFX8-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX8-NEXT: s_bfe_u32 s1, s1, 0x170001
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1546,7 +1546,7 @@ define amdgpu_ps i24 @s_fshl_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX9: ; %bb.0:
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v1, 23
; GFX9-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX9-NEXT: s_bfe_u32 s1, s1, 0x170001
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1646,7 +1646,7 @@ define i24 @v_fshl_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX6-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v4, 23
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX6-NEXT: v_bfe_u32 v1, v1, 1, 23
; GFX6-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1676,7 +1676,7 @@ define i24 @v_fshl_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX8-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v4, 23
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX8-NEXT: v_bfe_u32 v1, v1, 1, 23
; GFX8-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1706,7 +1706,7 @@ define i24 @v_fshl_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX9-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v4, 23
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX9-NEXT: v_bfe_u32 v1, v1, 1, 23
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1822,7 +1822,7 @@ define amdgpu_ps i48 @s_fshl_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX6-NEXT: s_lshl_b32 s6, s6, 16
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX6-NEXT: s_lshl_b32 s0, s0, 16
-; GFX6-NEXT: v_mov_b32_e32 v3, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v3, 23
; GFX6-NEXT: s_or_b32 s6, s8, s6
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
; GFX6-NEXT: s_lshr_b32 s0, s2, 16
@@ -1959,7 +1959,7 @@ define amdgpu_ps i48 @s_fshl_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX8-NEXT: s_or_b32 s2, s2, s6
; GFX8-NEXT: s_lshl_b32 s3, s3, 8
; GFX8-NEXT: s_and_b32 s6, s9, 0xff
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v1, 23
; GFX8-NEXT: s_or_b32 s3, s8, s3
; GFX8-NEXT: s_and_b32 s6, 0xffff, s6
; GFX8-NEXT: v_mul_lo_u32 v1, v0, v1
@@ -2079,7 +2079,7 @@ define amdgpu_ps i48 @s_fshl_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX9-NEXT: s_or_b32 s2, s2, s6
; GFX9-NEXT: s_lshl_b32 s3, s3, 8
; GFX9-NEXT: s_and_b32 s6, s9, 0xff
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v1, 23
; GFX9-NEXT: s_or_b32 s3, s8, s3
; GFX9-NEXT: s_and_b32 s6, 0xffff, s6
; GFX9-NEXT: v_mul_lo_u32 v1, v0, v1
@@ -2414,7 +2414,7 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX6-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v7, 23
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX6-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX6-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
@@ -2461,7 +2461,7 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX8-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v7, 23
; GFX8-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX8-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX8-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
@@ -2508,7 +2508,7 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX9-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v7, 23
; GFX9-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX9-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX9-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index 61588e6..8538dca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -1487,7 +1487,7 @@ define amdgpu_ps i24 @s_fshr_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX6: ; %bb.0:
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v1, 23
; GFX6-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1518,7 +1518,7 @@ define amdgpu_ps i24 @s_fshr_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX8: ; %bb.0:
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v1, 23
; GFX8-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1549,7 +1549,7 @@ define amdgpu_ps i24 @s_fshr_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt)
; GFX9: ; %bb.0:
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v1, 23
; GFX9-NEXT: s_and_b32 s2, s2, 0xffffff
; GFX9-NEXT: s_and_b32 s1, s1, 0xffffff
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
@@ -1652,7 +1652,7 @@ define i24 @v_fshr_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX6-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v4, 23
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
; GFX6-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1683,7 +1683,7 @@ define i24 @v_fshr_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX8-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v4, 23
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 1, v0
; GFX8-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1714,7 +1714,7 @@ define i24 @v_fshr_i24(i24 %lhs, i24 %rhs, i24 %amt) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
-; GFX9-NEXT: v_mov_b32_e32 v4, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v4, 23
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffffff, v1
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
@@ -1820,7 +1820,7 @@ define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX6-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2
; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v3, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v3, 23
; GFX6-NEXT: s_lshr_b32 s6, s0, 16
; GFX6-NEXT: s_and_b32 s8, s0, 0xff
; GFX6-NEXT: s_lshl_b32 s9, s9, 8
@@ -1962,7 +1962,7 @@ define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX8-NEXT: s_or_b32 s2, s2, s8
; GFX8-NEXT: s_lshl_b32 s3, s3, 8
; GFX8-NEXT: s_and_b32 s8, s11, 0xff
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v1, 23
; GFX8-NEXT: s_or_b32 s3, s10, s3
; GFX8-NEXT: s_and_b32 s8, 0xffff, s8
; GFX8-NEXT: v_mul_lo_u32 v1, v0, v1
@@ -2082,7 +2082,7 @@ define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 i
; GFX9-NEXT: s_or_b32 s2, s2, s8
; GFX9-NEXT: s_lshl_b32 s3, s3, 8
; GFX9-NEXT: s_and_b32 s8, s11, 0xff
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v1, 23
; GFX9-NEXT: s_or_b32 s3, s10, s3
; GFX9-NEXT: s_and_b32 s8, 0xffff, s8
; GFX9-NEXT: v_mul_lo_u32 v1, v0, v1
@@ -2424,7 +2424,7 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX6-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX6-NEXT: v_not_b32_e32 v7, 23
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX6-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX6-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
@@ -2473,7 +2473,7 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX8-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX8-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX8-NEXT: v_not_b32_e32 v7, 23
; GFX8-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX8-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX8-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
@@ -2522,7 +2522,7 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v6
-; GFX9-NEXT: v_mov_b32_e32 v7, 0xffffffe8
+; GFX9-NEXT: v_not_b32_e32 v7, 23
; GFX9-NEXT: v_and_b32_e32 v4, 0xffffff, v4
; GFX9-NEXT: v_and_b32_e32 v5, 0xffffff, v5
; GFX9-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
index 6e96a4d..546376c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.intersect_ray.ll
@@ -865,7 +865,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX1030-NEXT: flat_load_dword v2, v[0:1]
; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c7
-; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1030-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:11], s[0:3]
; GFX1030-NEXT: s_waitcnt vmcnt(0)
@@ -894,7 +894,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
; GFX1013-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX1013-NEXT: flat_load_dword v2, v[0:1]
; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c7
-; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1013-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:11], s[4:7]
; GFX1013-NEXT: s_waitcnt vmcnt(0)
@@ -973,7 +973,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
; GFX1030-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX1030-NEXT: flat_load_dword v2, v[0:1]
; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c6
-; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1030-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:8], s[0:3] a16
; GFX1030-NEXT: s_waitcnt vmcnt(0)
@@ -999,7 +999,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
; GFX1013-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX1013-NEXT: flat_load_dword v2, v[0:1]
; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c6
-; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1013-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:8], s[4:7] a16
; GFX1013-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.v2i16.ll
index 5613501..8556872 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.v2i16.ll
@@ -147,7 +147,7 @@ define <2 x i16> @v_sub_v2i16_neg_inline_imm_splat(<2 x i16> %a) {
; GFX8-LABEL: v_sub_v2i16_neg_inline_imm_splat:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffc0
+; GFX8-NEXT: v_not_b32_e32 v1, 63
; GFX8-NEXT: v_subrev_u16_e32 v2, 0xffc0, v0
; GFX8-NEXT: v_sub_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
@@ -211,7 +211,7 @@ define <2 x i16> @v_sub_v2i16_neg_inline_imm_hi(<2 x i16> %a) {
; GFX8-LABEL: v_sub_v2i16_neg_inline_imm_hi:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, 0xffffffc0
+; GFX8-NEXT: v_not_b32_e32 v1, 63
; GFX8-NEXT: v_subrev_u16_e32 v2, 4, v0
; GFX8-NEXT: v_sub_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll b/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
index f50b4a4..2b4bea1 100644
--- a/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitreverse-inline-immediates.ll
@@ -1,4 +1,6 @@
-; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
; Test that materialization constants that are the bit reversed of
; inline immediates are replaced with bfrev of the inline immediate to
@@ -6,7 +8,7 @@
; GCN-LABEL: {{^}}materialize_0_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_0_i32(ptr addrspace(1) %out) {
store i32 0, ptr addrspace(1) %out
ret void
@@ -15,7 +17,7 @@ define amdgpu_kernel void @materialize_0_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_0_i64:
; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_0_i64(ptr addrspace(1) %out) {
store i64 0, ptr addrspace(1) %out
ret void
@@ -23,7 +25,7 @@ define amdgpu_kernel void @materialize_0_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_neg1_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], -1{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_neg1_i32(ptr addrspace(1) %out) {
store i32 -1, ptr addrspace(1) %out
ret void
@@ -32,7 +34,7 @@ define amdgpu_kernel void @materialize_neg1_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_neg1_i64:
; GCN: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN: v_mov_b32_e32 v[[HIK:[0-9]+]], v[[LOK]]{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_neg1_i64(ptr addrspace(1) %out) {
store i64 -1, ptr addrspace(1) %out
ret void
@@ -40,7 +42,7 @@ define amdgpu_kernel void @materialize_neg1_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_signbit_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 1{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_signbit_i32(ptr addrspace(1) %out) {
store i32 -2147483648, ptr addrspace(1) %out
ret void
@@ -49,7 +51,7 @@ define amdgpu_kernel void @materialize_signbit_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_signbit_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], 1{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_signbit_i64(ptr addrspace(1) %out) {
store i64 -9223372036854775808, ptr addrspace(1) %out
ret void
@@ -57,7 +59,7 @@ define amdgpu_kernel void @materialize_signbit_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_neg16_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], -16{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_neg16_i32(ptr addrspace(1) %out) {
store i32 268435455, ptr addrspace(1) %out
ret void
@@ -66,7 +68,7 @@ define amdgpu_kernel void @materialize_rev_neg16_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_neg16_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], -16{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_neg16_i64(ptr addrspace(1) %out) {
store i64 1152921504606846975, ptr addrspace(1) %out
ret void
@@ -74,7 +76,7 @@ define amdgpu_kernel void @materialize_rev_neg16_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_neg17_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0xf7ffffff{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_neg17_i32(ptr addrspace(1) %out) {
store i32 -134217729, ptr addrspace(1) %out
ret void
@@ -83,7 +85,7 @@ define amdgpu_kernel void @materialize_rev_neg17_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_neg17_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], -1{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0xf7ffffff{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_neg17_i64(ptr addrspace(1) %out) {
store i64 -576460752303423489, ptr addrspace(1) %out
ret void
@@ -91,7 +93,7 @@ define amdgpu_kernel void @materialize_rev_neg17_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_64_i32:
; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 64{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_64_i32(ptr addrspace(1) %out) {
store i32 33554432, ptr addrspace(1) %out
ret void
@@ -100,7 +102,7 @@ define amdgpu_kernel void @materialize_rev_64_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_64_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_bfrev_b32_e32 v[[HIK:[0-9]+]], 64{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_64_i64(ptr addrspace(1) %out) {
store i64 144115188075855872, ptr addrspace(1) %out
ret void
@@ -108,7 +110,7 @@ define amdgpu_kernel void @materialize_rev_64_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_65_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x82000000{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_65_i32(ptr addrspace(1) %out) {
store i32 -2113929216, ptr addrspace(1) %out
ret void
@@ -117,7 +119,7 @@ define amdgpu_kernel void @materialize_rev_65_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_65_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0x82000000{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_65_i64(ptr addrspace(1) %out) {
store i64 -9079256848778919936, ptr addrspace(1) %out
ret void
@@ -125,7 +127,7 @@ define amdgpu_kernel void @materialize_rev_65_i64(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_3_i32:
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], -2.0{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_3_i32(ptr addrspace(1) %out) {
store i32 -1073741824, ptr addrspace(1) %out
ret void
@@ -134,24 +136,80 @@ define amdgpu_kernel void @materialize_rev_3_i32(ptr addrspace(1) %out) {
; GCN-LABEL: {{^}}materialize_rev_3_i64:
; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], -2.0{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_3_i64(ptr addrspace(1) %out) {
store i64 -4611686018427387904, ptr addrspace(1) %out
ret void
}
+; GCN-LABEL: {{^}}materialize_rev_0.5_i32:
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 0.5{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_0.5_i32(ptr addrspace(1) %out) {
+ store i32 252, ptr addrspace(1) %out
+ ret void
+}
+
; GCN-LABEL: {{^}}materialize_rev_1.0_i32:
-; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x1fc{{$}}
-; GCN: buffer_store_dword [[K]]
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 1.0{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
define amdgpu_kernel void @materialize_rev_1.0_i32(ptr addrspace(1) %out) {
store i32 508, ptr addrspace(1) %out
ret void
}
+; GCN-LABEL: {{^}}materialize_rev_2.0_i32:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 2{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_2.0_i32(ptr addrspace(1) %out) {
+ store i32 2, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_rev_4.0_i32:
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], 4.0{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_4.0_i32(ptr addrspace(1) %out) {
+ store i32 258, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_rev_neg0.5_i32:
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], -0.5{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_neg0.5_i32(ptr addrspace(1) %out) {
+ store i32 253, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_rev_neg1.0_i32:
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], -1.0{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_neg1.0_i32(ptr addrspace(1) %out) {
+ store i32 509, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_rev_neg2.0_i32:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 3{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_neg2.0_i32(ptr addrspace(1) %out) {
+ store i32 3, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_rev_neg4.0_i32:
+; GCN: v_bfrev_b32_e32 [[K:v[0-9]+]], -4.0{{$}}
+; GCN: {{buffer|flat}}_store_dword {{.*}}[[K]]
+define amdgpu_kernel void @materialize_rev_neg4.0_i32(ptr addrspace(1) %out) {
+ store i32 259, ptr addrspace(1) %out
+ ret void
+}
+
; GCN-LABEL: {{^}}materialize_rev_1.0_i64:
-; GCN-DAG: v_mov_b32_e32 v[[LOK:[0-9]+]], 0x1fc{{$}}
+; GCN-DAG: v_bfrev_b32_e32 v[[LOK:[0-9]+]], 1.0{{$}}
; GCN-DAG: v_mov_b32_e32 v[[HIK:[0-9]+]], 0{{$}}
-; GCN: buffer_store_dwordx2 v[[[LOK]]:[[HIK]]]
+; GCN: {{buffer|flat}}_store_dwordx2 {{.*}}v[[[LOK]]:[[HIK]]]
define amdgpu_kernel void @materialize_rev_1.0_i64(ptr addrspace(1) %out) {
store i64 508, ptr addrspace(1) %out
ret void
@@ -219,3 +277,114 @@ define amdgpu_kernel void @s_materialize_rev_1.0_i32() {
call void asm sideeffect "; use $0", "s"(i32 508)
ret void
}
+
+; GCN-LABEL: {{^}}s_materialize_not_1.0_i32:
+; GCN: s_mov_b32 s{{[0-9]+}}, 0xc07fffff
+define void @s_materialize_not_1.0_i32() {
+ call void asm sideeffect "; use $0", "s"(i32 -1065353217)
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_materialize_not_neg_1.0_i32:
+; GCN: s_mov_b32 s{{[0-9]+}}, 0x407fffff
+define void @s_materialize_not_neg_1.0_i32() {
+ call void asm sideeffect "; use $0", "s"(i32 1082130431)
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_materialize_not_inv2pi_i32:
+; GCN: s_mov_b32 s{{[0-9]+}}, 0xc1dd067c
+define void @s_materialize_not_inv2pi_i32() {
+ call void asm sideeffect "; use $0", "s"(i32 -1042479492)
+ ret void
+}
+
+; GCN-LABEL: {{^}}s_materialize_not_neg_inv2pi_i32:
+; GCN: s_mov_b32 s{{[0-9]+}}, 0x41dd067c
+define void @s_materialize_not_neg_inv2pi_i32() {
+ call void asm sideeffect "; use $0", "s"(i32 1105004156)
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_0.5_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, 0.5
+define void @materialize_not_0.5_i32(ptr addrspace(1) %out) {
+ store i32 -1056964609, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_1.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, 1.0
+define void @materialize_not_1.0_i32(ptr addrspace(1) %out) {
+ store i32 -1065353217, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_2.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, 2.0
+define void @materialize_not_2.0_i32(ptr addrspace(1) %out) {
+ store i32 -1073741825, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_4.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, 4.0
+define void @materialize_not_4.0_i32(ptr addrspace(1) %out) {
+ store i32 -1082130433, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_neg_0.5_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, -0.5
+define void @materialize_not_neg_0.5_i32(ptr addrspace(1) %out) {
+ store i32 1090519039, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_neg_1.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, -1.0
+define void @materialize_not_neg_1.0_i32(ptr addrspace(1) %out) {
+ store i32 1082130431, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_neg2.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, -2.0
+define void @materialize_not_neg2.0_i32(ptr addrspace(1) %out) {
+ store i32 1073741823, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_neg4.0_i32:
+; GCN: v_not_b32_e32 v{{[0-9]+}}, -4.0
+define void @materialize_not_neg4.0_i32(ptr addrspace(1) %out) {
+ store i32 1065353215, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_inv2pi_i32:
+; SI: v_mov_b32_e32 v{{[0-9]+}}, 0xc1dd067c
+; VI: v_not_b32_e32 v{{[0-9]+}}, 0.15915494
+define void @materialize_not_inv2pi_i32(ptr addrspace(1) %out) {
+ store i32 -1042479492, ptr addrspace(1) %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}materialize_not_neg_inv2pi_i32:
+; GCN: v_mov_b32_e32 v{{[0-9]+}}, 0x41dd067c
+define void @materialize_not_neg_inv2pi_i32(ptr addrspace(1) %out) {
+ store i32 1105004156, ptr addrspace(1) %out
+ ret void
+}
+
+; One constant is reversible, the other is not. We shouldn't break
+; vopd packing for this.
+; GFX11-LABEL: {{^}}vopd_materialize:
+; FIXME-GFX11: v_dual_mov_b32 v0, 0x102 :: v_dual_mov_b32 v1, 1.0
+; GFX11: v_bfrev_b32_e32 v0, 4.0
+; GFX11: v_mov_b32_e32 v1, 1.0
+define <2 x i32> @vopd_materialize() {
+ %insert0 = insertelement <2 x i32> poison, i32 258, i32 0
+ %insert1 = insertelement <2 x i32> %insert0, i32 1065353216, i32 1
+ ret <2 x i32> %insert1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 1d86fbc..8779bb0 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -4461,7 +4461,7 @@ define amdgpu_kernel void @elim_redun_check_v2(ptr addrspace(1) %out, <2 x float
; GISEL-IEEE-NEXT: v_mul_f32_e32 v3, 0x37800000, v1
; GISEL-IEEE-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GISEL-IEEE-NEXT: v_cmp_class_f32_e32 vcc, v0, v4
-; GISEL-IEEE-NEXT: v_mov_b32_e32 v3, 0x80000000
+; GISEL-IEEE-NEXT: v_bfrev_b32_e32 v3, 1
; GISEL-IEEE-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc
; GISEL-IEEE-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GISEL-IEEE-NEXT: v_cmp_lt_f32_e32 vcc, s6, v3
@@ -4557,7 +4557,7 @@ define amdgpu_kernel void @elim_redun_check_v2(ptr addrspace(1) %out, <2 x float
; GISEL-DAZ-NEXT: v_mul_f32_e32 v3, 0x37800000, v1
; GISEL-DAZ-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GISEL-DAZ-NEXT: v_cmp_class_f32_e32 vcc, v0, v4
-; GISEL-DAZ-NEXT: v_mov_b32_e32 v3, 0x80000000
+; GISEL-DAZ-NEXT: v_bfrev_b32_e32 v3, 1
; GISEL-DAZ-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc
; GISEL-DAZ-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GISEL-DAZ-NEXT: v_cmp_lt_f32_e32 vcc, s6, v3
@@ -4665,7 +4665,7 @@ define amdgpu_kernel void @elim_redun_check_v2_ult(ptr addrspace(1) %out, <2 x f
; GISEL-IEEE-NEXT: v_mul_f32_e32 v3, 0x37800000, v1
; GISEL-IEEE-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GISEL-IEEE-NEXT: v_cmp_class_f32_e32 vcc, v0, v4
-; GISEL-IEEE-NEXT: v_mov_b32_e32 v3, 0x80000000
+; GISEL-IEEE-NEXT: v_bfrev_b32_e32 v3, 1
; GISEL-IEEE-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc
; GISEL-IEEE-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GISEL-IEEE-NEXT: v_cmp_nge_f32_e32 vcc, s6, v3
@@ -4761,7 +4761,7 @@ define amdgpu_kernel void @elim_redun_check_v2_ult(ptr addrspace(1) %out, <2 x f
; GISEL-DAZ-NEXT: v_mul_f32_e32 v3, 0x37800000, v1
; GISEL-DAZ-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GISEL-DAZ-NEXT: v_cmp_class_f32_e32 vcc, v0, v4
-; GISEL-DAZ-NEXT: v_mov_b32_e32 v3, 0x80000000
+; GISEL-DAZ-NEXT: v_bfrev_b32_e32 v3, 1
; GISEL-DAZ-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc
; GISEL-DAZ-NEXT: v_mov_b32_e32 v4, 0x7fc00000
; GISEL-DAZ-NEXT: v_cmp_nge_f32_e32 vcc, s6, v3
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
index f5dbaaf..68482ca 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
@@ -6515,7 +6515,7 @@ define amdgpu_kernel void @atomic_load_i64_neg_offset(ptr addrspace(1) %in, ptr
; CI: ; %bb.0: ; %entry
; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: v_mov_b32_e32 v0, 0xffffffe0
+; CI-NEXT: v_not_b32_e32 v0, 31
; CI-NEXT: v_mov_b32_e32 v1, -1
; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
index 8d0397c..f1a4fe0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.intersect_ray.ll
@@ -366,7 +366,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
; GFX1013-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4
; GFX1013-NEXT: flat_load_dword v2, v[0:1]
; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c7
-; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1013-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:11], s[0:3]
; GFX1013-NEXT: s_waitcnt vmcnt(0)
@@ -391,7 +391,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v0
; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s4
; GFX1030-NEXT: flat_load_dword v2, v[0:1]
-; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1030-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c7
; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:11], s[0:3]
@@ -408,7 +408,8 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_nsa_reassign(ptr %p_ray, <4
; GFX11-NEXT: v_dual_mov_b32 v3, 0x40400000 :: v_dual_mov_b32 v4, 4.0
; GFX11-NEXT: v_dual_mov_b32 v5, 0x40a00000 :: v_dual_mov_b32 v6, 0
; GFX11-NEXT: v_dual_mov_b32 v8, 2.0 :: v_dual_mov_b32 v9, 0xb36211c7
-; GFX11-NEXT: v_dual_mov_b32 v10, 0x102 :: v_dual_mov_b32 v7, 1.0
+; GFX11-NEXT: v_bfrev_b32_e32 v10, 4.0
+; GFX11-NEXT: v_mov_b32_e32 v7, 1.0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, s4, s4, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -456,7 +457,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
; GFX1013-NEXT: v_add_co_ci_u32_e64 v1, s4, s5, 0, s4
; GFX1013-NEXT: flat_load_dword v2, v[0:1]
; GFX1013-NEXT: v_mov_b32_e32 v0, 0xb36211c6
-; GFX1013-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1013-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1013-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1013-NEXT: image_bvh64_intersect_ray v[0:3], v[0:8], s[0:3] a16
; GFX1013-NEXT: s_waitcnt vmcnt(0)
@@ -478,7 +479,7 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
; GFX1030-NEXT: v_add_co_u32 v0, s4, s4, v0
; GFX1030-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s4
; GFX1030-NEXT: flat_load_dword v2, v[0:1]
-; GFX1030-NEXT: v_mov_b32_e32 v1, 0x102
+; GFX1030-NEXT: v_bfrev_b32_e32 v1, 4.0
; GFX1030-NEXT: v_mov_b32_e32 v0, 0xb36211c6
; GFX1030-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX1030-NEXT: image_bvh64_intersect_ray v[0:3], v[0:8], s[0:3] a16
@@ -492,8 +493,10 @@ define amdgpu_kernel void @image_bvh64_intersect_ray_a16_nsa_reassign(ptr %p_ray
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x34
; GFX11-NEXT: v_dual_mov_b32 v2, 0x48004500 :: v_dual_mov_b32 v5, 2.0
-; GFX11-NEXT: v_dual_mov_b32 v4, 1.0 :: v_dual_mov_b32 v7, 0x102
-; GFX11-NEXT: v_dual_mov_b32 v6, 0xb36211c6 :: v_dual_mov_b32 v3, 0
+; GFX11-NEXT: v_mov_b32_e32 v4, 1.0
+; GFX11-NEXT: v_mov_b32_e32 v6, 0xb36211c6
+; GFX11-NEXT: v_bfrev_b32_e32 v7, 4.0
+; GFX11-NEXT: v_mov_b32_e32 v3, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, s4, s4, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
diff --git a/llvm/test/CodeGen/AMDGPU/med3-knownbits.ll b/llvm/test/CodeGen/AMDGPU/med3-knownbits.ll
index 8953d37..fe0ab81 100644
--- a/llvm/test/CodeGen/AMDGPU/med3-knownbits.ll
+++ b/llvm/test/CodeGen/AMDGPU/med3-knownbits.ll
@@ -76,7 +76,7 @@ define i32 @v_known_signbits_smed3(i16 %a, i16 %b) {
; SI-GISEL: ; %bb.0:
; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-GISEL-NEXT: v_bfe_i32 v1, v1, 0, 16
-; SI-GISEL-NEXT: v_mov_b32_e32 v2, 0xffffffc0
+; SI-GISEL-NEXT: v_not_b32_e32 v2, 63
; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0x80
; SI-GISEL-NEXT: v_med3_i32 v1, v1, v2, v3
; SI-GISEL-NEXT: v_ashrrev_i32_e32 v2, 31, v1
@@ -84,7 +84,7 @@ define i32 @v_known_signbits_smed3(i16 %a, i16 %b) {
; SI-GISEL-NEXT: v_xor_b32_e32 v1, v1, v2
; SI-GISEL-NEXT: v_cvt_f32_u32_e32 v3, v1
; SI-GISEL-NEXT: v_sub_i32_e32 v5, vcc, 0, v1
-; SI-GISEL-NEXT: v_mov_b32_e32 v4, 0xffffffe0
+; SI-GISEL-NEXT: v_not_b32_e32 v4, 31
; SI-GISEL-NEXT: v_rcp_iflag_f32_e32 v3, v3
; SI-GISEL-NEXT: v_bfe_i32 v0, v0, 0, 16
; SI-GISEL-NEXT: v_med3_i32 v0, v0, v4, 64
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 352c1ec..048a775 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -1427,7 +1427,7 @@ define hidden void @or_store_div(ptr addrspace(1) %in0, ptr addrspace(1) %in1, i
; GFX10-NEXT: global_load_dword v4, v[2:3], off
; GFX10-NEXT: global_load_dword v9, v[0:1], off
; GFX10-NEXT: v_mov_b32_e32 v0, 16
-; GFX10-NEXT: v_mov_b32_e32 v2, 0x102
+; GFX10-NEXT: v_bfrev_b32_e32 v2, 4.0
; GFX10-NEXT: s_waitcnt vmcnt(1)
; GFX10-NEXT: v_lshlrev_b16 v1, 8, v4
; GFX10-NEXT: v_lshrrev_b32_sdwa v0, v0, v4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll b/llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll
index 1a55bf6..c5fc510 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll
+++ b/llvm/test/CodeGen/AMDGPU/shrink-add-sub-constant.ll
@@ -2627,7 +2627,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_neg32_neg32(ptr addrspace(1) %out,
; VI-GISEL: ; %bb.0:
; VI-GISEL-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-GISEL-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v4, 0xffffffe0
+; VI-GISEL-NEXT: v_not_b32_e32 v4, 31
; VI-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; VI-GISEL-NEXT: v_mov_b32_e32 v0, s2
; VI-GISEL-NEXT: v_mov_b32_e32 v1, s3
@@ -2758,7 +2758,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_0_neg32(ptr addrspace(1) %out, ptr
; VI-GISEL-NEXT: flat_load_dword v3, v[0:1]
; VI-GISEL-NEXT: v_mov_b32_e32 v0, s0
; VI-GISEL-NEXT: v_add_u32_e32 v0, vcc, v0, v2
-; VI-GISEL-NEXT: v_mov_b32_e32 v2, 0xffffffe0
+; VI-GISEL-NEXT: v_not_b32_e32 v2, 31
; VI-GISEL-NEXT: v_mov_b32_e32 v1, s1
; VI-GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-GISEL-NEXT: s_waitcnt vmcnt(0)
@@ -3963,7 +3963,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_undef_neg32(ptr addrspace(1) %out,
; VI-GISEL-NEXT: flat_load_dword v3, v[0:1]
; VI-GISEL-NEXT: v_mov_b32_e32 v0, s0
; VI-GISEL-NEXT: v_add_u32_e32 v0, vcc, v0, v2
-; VI-GISEL-NEXT: v_mov_b32_e32 v2, 0xffffffe0
+; VI-GISEL-NEXT: v_not_b32_e32 v2, 31
; VI-GISEL-NEXT: v_mov_b32_e32 v1, s1
; VI-GISEL-NEXT: s_and_b32 s0, 0xffff, s0
; VI-GISEL-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
@@ -4106,7 +4106,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_neg32_undef(ptr addrspace(1) %out,
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0xffffffe0
+; GFX9-GISEL-NEXT: v_not_b32_e32 v2, 31
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-GISEL-NEXT: global_load_dword v1, v0, s[2:3]
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-insts-scalar-bit-ops.mir b/llvm/test/CodeGen/AMDGPU/shrink-insts-scalar-bit-ops.mir
index dcfe4db..eb7b9b6 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-insts-scalar-bit-ops.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-insts-scalar-bit-ops.mir
@@ -55,3 +55,25 @@ body: |
S_ENDPGM 0, implicit $sgpr4
...
+
+# Make sure we don't clobber the live value in scc by replacing the
+# s_mov_b32 imm with s_not_b32 ~imm
+---
+name: scalar_immediate_bitwise_not_live_scc
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr4, $sgpr5
+ ; CHECK-LABEL: name: scalar_immediate_bitwise_not_live_scc
+ ; CHECK: liveins: $sgpr4, $sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_CMP_EQ_U32 killed renamable $sgpr4, killed renamable $sgpr5, implicit-def $scc
+ ; CHECK-NEXT: $sgpr6 = S_MOV_B32 -1065353217
+ ; CHECK-NEXT: $sgpr7 = S_CSELECT_B32 1, 2, implicit $scc
+ ; CHECK-NEXT: S_ENDPGM 0, implicit $sgpr6
+ S_CMP_EQ_U32 killed renamable $sgpr4, killed renamable $sgpr5, implicit-def $scc
+ $sgpr6 = S_MOV_B32 -1065353217
+ $sgpr7 = S_CSELECT_B32 1, 2, implicit $scc
+ S_ENDPGM 0, implicit $sgpr6
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 48b9c72..84906ac 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -1366,7 +1366,7 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_add_u32 s1, 0, 0xaaaa0000
-; GCN-NEXT: v_mov_b32_e32 v0, 0xffffffe8
+; GCN-NEXT: v_not_b32_e32 v0, 23
; GCN-NEXT: v_mul_hi_u32 v0, s1, v0
; GCN-NEXT: s_addc_u32 s8, 0, 42
; GCN-NEXT: s_add_i32 s8, s8, 0xaaaaa80
@@ -1513,7 +1513,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_add_u32 s4, 0, 0xaaaa0000
-; GCN-NEXT: v_mov_b32_e32 v2, 0xffffffe8
+; GCN-NEXT: v_not_b32_e32 v2, 23
; GCN-NEXT: v_mul_hi_u32 v2, s4, v2
; GCN-NEXT: s_addc_u32 s5, 0, 42
; GCN-NEXT: s_add_i32 s5, s5, 0xaaaaa80
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index f355898..c0c84d4 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -963,7 +963,7 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
; GCN-NEXT: s_add_u32 s0, 0, 0xaaaa0000
-; GCN-NEXT: v_mov_b32_e32 v0, 0xffffffe8
+; GCN-NEXT: v_not_b32_e32 v0, 23
; GCN-NEXT: v_mul_hi_u32 v0, s0, v0
; GCN-NEXT: s_addc_u32 s1, 0, 42
; GCN-NEXT: s_add_i32 s1, s1, 0xaaaaa80