aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Green <david.green@arm.com>2024-04-19 09:30:13 +0100
committerTom Stellard <tstellar@redhat.com>2024-04-30 16:01:41 -0700
commita96b04442c9fc29cd884b56bf07af8615191176f (patch)
treeed2d45aa6b2bec04df262fdaf44b894504deb6ea
parentaea091b70edaf5b53bdd37f5ee6351c1642b07cc (diff)
downloadllvm-a96b04442c9fc29cd884b56bf07af8615191176f.zip
llvm-a96b04442c9fc29cd884b56bf07af8615191176f.tar.gz
llvm-a96b04442c9fc29cd884b56bf07af8615191176f.tar.bz2
[AArch64] Remove invalid uabdl patterns. (#89272)
These were added in https://reviews.llvm.org/D14208, which look like they attempt to detect abs from xor+add+ashr. They do not appear to be detecting the correct value for the src input though, which I think is intended to be the sub(zext, zext) part of the pattern. We have pattens from abs now, so the old invalid patterns can be removed. Fixes #88784 (cherry picked from commit 851462fcaa7f6e3301865de84f98be7e872e64b6)
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td10
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll48
2 files changed, 48 insertions, 10 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 03baa74..ac61dd8 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4885,19 +4885,9 @@ defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (v8i8 V64:$opA)),
- (zext (v8i8 V64:$opB))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
(zext (extract_high_v16i8 (v16i8 V128:$opB)))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
- (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))),
- (zext (extract_high_v16i8 (v16i8 V128:$opB)))),
- (AArch64vashr v8i16:$src, (i32 15))))),
- (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
(zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index fe4da2e..89c8d54 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1848,3 +1848,51 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
%absel = select <2 x i1> %abcmp, <2 x i128> %ababs, <2 x i128> %abdiff
ret <2 x i128> %absel
}
+
+define <8 x i16> @pr88784(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
+; CHECK-SD-LABEL: pr88784:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: usubl.8h v0, v0, v1
+; CHECK-SD-NEXT: cmlt.8h v1, v2, #0
+; CHECK-SD-NEXT: ssra.8h v0, v2, #15
+; CHECK-SD-NEXT: eor.16b v0, v1, v0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: pr88784:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: usubl.8h v0, v0, v1
+; CHECK-GI-NEXT: sshr.8h v1, v2, #15
+; CHECK-GI-NEXT: ssra.8h v0, v2, #15
+; CHECK-GI-NEXT: eor.16b v0, v1, v0
+; CHECK-GI-NEXT: ret
+ %l4 = zext <8 x i8> %l0 to <8 x i16>
+ %l5 = ashr <8 x i16> %l2, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %l6 = zext <8 x i8> %l1 to <8 x i16>
+ %l7 = sub <8 x i16> %l4, %l6
+ %l8 = add <8 x i16> %l5, %l7
+ %l9 = xor <8 x i16> %l5, %l8
+ ret <8 x i16> %l9
+}
+
+define <8 x i16> @pr88784_fixed(<8 x i8> %l0, <8 x i8> %l1, <8 x i16> %l2) {
+; CHECK-SD-LABEL: pr88784_fixed:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: uabdl.8h v0, v0, v1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: pr88784_fixed:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: usubl.8h v0, v0, v1
+; CHECK-GI-NEXT: sshr.8h v1, v0, #15
+; CHECK-GI-NEXT: ssra.8h v0, v0, #15
+; CHECK-GI-NEXT: eor.16b v0, v1, v0
+; CHECK-GI-NEXT: ret
+ %l4 = zext <8 x i8> %l0 to <8 x i16>
+ %l6 = zext <8 x i8> %l1 to <8 x i16>
+ %l7 = sub <8 x i16> %l4, %l6
+ %l5 = ashr <8 x i16> %l7, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %l8 = add <8 x i16> %l5, %l7
+ %l9 = xor <8 x i16> %l5, %l8
+ ret <8 x i16> %l9
+}
+