aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmara Emerson <aemerson@apple.com>2020-06-15 14:37:55 -0700
committerAmara Emerson <aemerson@apple.com>2020-06-15 14:42:14 -0700
commitfc905ae003df4d2cf9099afbdfc3aa840a82fe10 (patch)
tree0b536bf16ca7b893a7aec47924d18875a5ac1db1
parenteeb96e4f198a9e212f82a5dd067e3c1af6e5876f (diff)
downloadllvm-fc905ae003df4d2cf9099afbdfc3aa840a82fe10.zip
llvm-fc905ae003df4d2cf9099afbdfc3aa840a82fe10.tar.gz
llvm-fc905ae003df4d2cf9099afbdfc3aa840a82fe10.tar.bz2
[GlobalISel] Don't emit multiply by magic constant for zero memset values.
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp5
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir39
2 files changed, 44 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0c34ef1..b97731f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -936,6 +936,11 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
// Extend the byte value to the larger type, and then multiply by a magic
// value 0x010101... in order to replicate it across every byte.
+ // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
+ if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
+ return MIB.buildConstant(Ty, 0).getReg(0);
+ }
+
LLT ExtType = Ty.getScalarType();
auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
if (NumBits > 8) {
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index 89bb25d..a736f57 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -21,6 +21,12 @@
ret void
}
+ define void @test_zero_const(i8* nocapture %dst) local_unnamed_addr #0 {
+ entry:
+ tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 64, i1 false)
+ ret void
+ }
+
define void @test_ms3_const_both(i8* nocapture %dst) local_unnamed_addr #0 {
entry:
tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 64, i64 16, i1 false)
@@ -102,6 +108,39 @@ body: |
...
---
+name: test_zero_const
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.1.entry:
+ liveins: $w1, $x0
+
+ ; CHECK-LABEL: name: test_zero_const
+ ; CHECK: liveins: $w1, $x0
+ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+ ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store 16 into %ir.dst, align 1)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+ ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into %ir.dst + 16, align 1)
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+ ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into %ir.dst + 32, align 1)
+ ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+ ; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store 16 into %ir.dst + 48, align 1)
+ ; CHECK: RET_ReallyLR
+ %0:_(p0) = COPY $x0
+ %1:_(s32) = G_CONSTANT i32 0
+ %3:_(s64) = G_CONSTANT i64 64
+ %2:_(s8) = G_TRUNC %1(s32)
+ G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
+ RET_ReallyLR
+
+...
+
+---
name: test_ms3_const_both
alignment: 4
tracksRegLiveness: true