diff options
author | Philip Reames <preames@rivosinc.com> | 2024-02-05 14:24:02 -0800 |
---|---|---|
committer | Philip Reames <listmail@philipreames.com> | 2024-02-05 14:30:10 -0800 |
commit | e722d9662dd8cdd3be9e434b057593e97a7d4417 (patch) | |
tree | dcc235916bc4513e1a48a2924bd0fd739598fbd6 | |
parent | 6b42625b1f983f6aafb9f4fe2953970c73963603 (diff) | |
download | llvm-e722d9662dd8cdd3be9e434b057593e97a7d4417.zip llvm-e722d9662dd8cdd3be9e434b057593e97a7d4417.tar.gz llvm-e722d9662dd8cdd3be9e434b057593e97a7d4417.tar.bz2 |
[DAG] Avoid a crash when checking size of scalable type in visitANDLike
Fixes https://github.com/llvm/llvm-project/issues/80744. This transform
doesn't handled vectors at all, The fixed length ones pass the first
check, but would fail the constant operand checks which immediate follow.
This patch takes the simplest approach, and just guards the transform
for scalar integers.
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/and-add-lsr.ll | 58 |
2 files changed, 57 insertions, 3 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 3ce45e0..7f91de1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6376,7 +6376,7 @@ SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) { // TODO: Rewrite this to return a new 'AND' instead of using CombineTo. if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && - VT.getSizeInBits() <= 64 && N0->hasOneUse()) { + VT.isScalarInteger() && VT.getSizeInBits() <= 64 && N0->hasOneUse()) { if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal diff --git a/llvm/test/CodeGen/RISCV/and-add-lsr.ll b/llvm/test/CodeGen/RISCV/and-add-lsr.ll index c2c7a49..22ac9a2 100644 --- a/llvm/test/CodeGen/RISCV/and-add-lsr.ll +++ b/llvm/test/CodeGen/RISCV/and-add-lsr.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+v < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I -; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+v < %s \ ; RUN: | FileCheck %s -check-prefix=RV64I define i32 @and_add_lsr(i32 %x, i32 %y) { @@ -23,3 +23,57 @@ define i32 @and_add_lsr(i32 %x, i32 %y) { %r = and i32 %2, %1 ret i32 %r } + +; Make sure we don't crash on fixed length vectors +define <2 x i32> @and_add_lsr_vec(<2 x i32> %x, <2 x i32> %y) { +; RV32I-LABEL: and_add_lsr_vec: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a0, 1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32I-NEXT: vadd.vx v8, v8, a0 +; RV32I-NEXT: vsrl.vi v9, v9, 20 +; RV32I-NEXT: vand.vv v8, v9, v8 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and_add_lsr_vec: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a0, 1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64I-NEXT: vadd.vx v8, v8, a0 +; RV64I-NEXT: vsrl.vi v9, v9, 20 +; RV64I-NEXT: vand.vv v8, v9, v8 +; RV64I-NEXT: ret + %1 = add <2 x i32> %x, splat (i32 4095) + %2 = lshr <2 x i32> %y, splat (i32 20) + %r = and <2 x i32> %2, %1 + ret <2 x i32> %r +} + +; Make sure we don't crash on scalable vectors +define <vscale x 2 x i32> @and_add_lsr_vec2(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y) { +; RV32I-LABEL: and_add_lsr_vec2: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a0, 1 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV32I-NEXT: vadd.vx v8, v8, a0 +; RV32I-NEXT: vsrl.vi v9, v9, 20 +; RV32I-NEXT: vand.vv v8, v9, v8 +; RV32I-NEXT: ret +; +; RV64I-LABEL: and_add_lsr_vec2: +; RV64I: # %bb.0: +; RV64I-NEXT: lui a0, 1 +; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; RV64I-NEXT: vadd.vx v8, v8, a0 +; RV64I-NEXT: vsrl.vi v9, v9, 20 +; RV64I-NEXT: vand.vv v8, v9, v8 +; RV64I-NEXT: ret + %1 = add <vscale x 2 x i32> %x, splat (i32 4095) + %2 = lshr <vscale x 2 x i32> %y, splat (i32 20) + %r = and <vscale x 2 x i32> %2, %1 + ret <vscale x 2 x i32> %r +} |