aboutsummaryrefslogtreecommitdiff
path: root/mlir
diff options
context:
space:
mode:
authorTobias Gysi <tobias.gysi@nextsilicon.com>2024-01-19 11:10:57 +0100
committerGitHub <noreply@github.com>2024-01-19 11:10:57 +0100
commit9dd0eb9c9c207e7ea17912616c5cea58aa5c514d (patch)
tree851b42dd7a715d71a13afa5e413b0a2072357a1a /mlir
parent3d90e1fa94d17c3b27c89731760f28791bb30943 (diff)
downloadllvm-9dd0eb9c9c207e7ea17912616c5cea58aa5c514d.zip
llvm-9dd0eb9c9c207e7ea17912616c5cea58aa5c514d.tar.gz
llvm-9dd0eb9c9c207e7ea17912616c5cea58aa5c514d.tar.bz2
[mlir][llvm] Drop unreachable basic block during import (#78467)
This revision updates the LLVM IR import to support unreachable basic blocks. An unreachable block may dominate itself and a value defined inside the block may thus be used before its definition. The import does not support such dependencies. We thus delete the unreachable basic blocks before the import. This is possible since MLIR does not have basic block labels that can be reached using an indirect call and unreachable blocks can indeed be deleted safely. Additionally, add a small poison constant import test.
Diffstat (limited to 'mlir')
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp46
-rw-r--r--mlir/test/Target/LLVMIR/Import/constant.ll10
-rw-r--r--mlir/test/Target/LLVMIR/Import/exception.ll41
-rw-r--r--mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll35
4 files changed, 97 insertions, 35 deletions
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index e905408..928d807 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -26,6 +26,7 @@
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Tools/mlir-translate/Translation.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/StringSet.h"
@@ -132,18 +133,17 @@ static LogicalResult convertInstructionImpl(OpBuilder &odsBuilder,
return failure();
}
-/// Get a topologically sorted list of blocks for the given function.
+/// Get a topologically sorted list of blocks for the given basic blocks.
static SetVector<llvm::BasicBlock *>
-getTopologicallySortedBlocks(llvm::Function *func) {
+getTopologicallySortedBlocks(ArrayRef<llvm::BasicBlock *> basicBlocks) {
SetVector<llvm::BasicBlock *> blocks;
- for (llvm::BasicBlock &bb : *func) {
- if (!blocks.contains(&bb)) {
- llvm::ReversePostOrderTraversal<llvm::BasicBlock *> traversal(&bb);
+ for (llvm::BasicBlock *basicBlock : basicBlocks) {
+ if (!blocks.contains(basicBlock)) {
+ llvm::ReversePostOrderTraversal<llvm::BasicBlock *> traversal(basicBlock);
blocks.insert(traversal.begin(), traversal.end());
}
}
- assert(blocks.size() == func->size() && "some blocks are not sorted");
-
+ assert(blocks.size() == basicBlocks.size() && "some blocks are not sorted");
return blocks;
}
@@ -1859,11 +1859,26 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) {
if (func->isDeclaration())
return success();
- // Eagerly create all blocks.
- for (llvm::BasicBlock &bb : *func) {
- Block *block =
- builder.createBlock(&funcOp.getBody(), funcOp.getBody().end());
- mapBlock(&bb, block);
+ // Collect the set of basic blocks reachable from the function's entry block.
+ // This step is crucial as LLVM IR can contain unreachable blocks that
+ // self-dominate. As a result, an operation might utilize a variable it
+ // defines, which the import does not support. Given that MLIR lacks block
+ // label support, we can safely remove unreachable blocks, as there are no
+ // indirect branch instructions that could potentially target these blocks.
+ llvm::df_iterator_default_set<llvm::BasicBlock *> reachable;
+ for (llvm::BasicBlock *basicBlock : llvm::depth_first_ext(func, reachable))
+ (void)basicBlock;
+
+ // Eagerly create all reachable blocks.
+ SmallVector<llvm::BasicBlock *> reachableBasicBlocks;
+ for (llvm::BasicBlock &basicBlock : *func) {
+ // Skip unreachable blocks.
+ if (!reachable.contains(&basicBlock))
+ continue;
+ Region &body = funcOp.getBody();
+ Block *block = builder.createBlock(&body, body.end());
+ mapBlock(&basicBlock, block);
+ reachableBasicBlocks.push_back(&basicBlock);
}
// Add function arguments to the entry block.
@@ -1876,10 +1891,11 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) {
// Process the blocks in topological order. The ordered traversal ensures
// operands defined in a dominating block have a valid mapping to an MLIR
// value once a block is translated.
- SetVector<llvm::BasicBlock *> blocks = getTopologicallySortedBlocks(func);
+ SetVector<llvm::BasicBlock *> blocks =
+ getTopologicallySortedBlocks(reachableBasicBlocks);
setConstantInsertionPointToStart(lookupBlock(blocks.front()));
- for (llvm::BasicBlock *bb : blocks)
- if (failed(processBasicBlock(bb, lookupBlock(bb))))
+ for (llvm::BasicBlock *basicBlock : blocks)
+ if (failed(processBasicBlock(basicBlock, lookupBlock(basicBlock))))
return failure();
// Process the debug intrinsics that require a delayed conversion after
diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll
index cd2d00e..3c46f5b 100644
--- a/mlir/test/Target/LLVMIR/Import/constant.ll
+++ b/mlir/test/Target/LLVMIR/Import/constant.ll
@@ -47,6 +47,16 @@ define void @undef_constant(i32 %arg0) {
; // -----
+; CHECK-LABEL: @poison_constant
+define void @poison_constant(double %arg0) {
+ ; CHECK: %[[POISON:.+]] = llvm.mlir.poison : f64
+ ; CHECK: llvm.fadd %[[POISON]], %{{.*}} : f64
+ %1 = fadd double poison, %arg0
+ ret void
+}
+
+; // -----
+
; CHECK-LABEL: @null_constant
define ptr @null_constant() {
; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr
diff --git a/mlir/test/Target/LLVMIR/Import/exception.ll b/mlir/test/Target/LLVMIR/Import/exception.ll
index de22764..440d89e 100644
--- a/mlir/test/Target/LLVMIR/Import/exception.ll
+++ b/mlir/test/Target/LLVMIR/Import/exception.ll
@@ -12,34 +12,35 @@ define i32 @invokeLandingpad() personality ptr @__gxx_personality_v0 {
; CHECK: %[[a1:[0-9]+]] = llvm.mlir.addressof @_ZTIii : !llvm.ptr
; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 {alignment = 1 : i64} : (i32) -> !llvm.ptr
%1 = alloca i8
- ; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> ()
- invoke void @foo(ptr %1) to label %4 unwind label %2
+ ; CHECK: llvm.invoke @foo(%[[a3]]) to ^[[bb1:.*]] unwind ^[[bb4:.*]] : (!llvm.ptr) -> ()
+ invoke void @foo(ptr %1) to label %bb1 unwind label %bb4
-; CHECK: ^bb1:
+; CHECK: ^[[bb1]]:
+bb1:
+ ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^[[bb2:.*]] unwind ^[[bb4]] : (!llvm.ptr) -> !llvm.ptr
+ %2 = invoke ptr @bar(ptr %1) to label %bb2 unwind label %bb4
+
+; CHECK: ^[[bb2]]:
+bb2:
+ ; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^[[bb3:.*]] unwind ^[[bb4]] vararg(!llvm.func<void (ptr, ...)>) : (!llvm.ptr, i32) -> ()
+ invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %bb3 unwind label %bb4
+
+; CHECK: ^[[bb3]]:
+bb3:
+ ; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^[[bb5:.*]] unwind ^[[bb4]] vararg(!llvm.func<void (ptr, ...)>) : !llvm.ptr, (!llvm.ptr, i32) -> ()
+ invoke void (ptr, ...) undef(ptr %1, i32 0) to label %bb5 unwind label %bb4
+
+; CHECK: ^[[bb4]]:
+bb4:
; CHECK: %{{[0-9]+}} = llvm.landingpad (catch %{{[0-9]+}} : !llvm.ptr) (catch %[[a1]] : !llvm.ptr) (filter %{{[0-9]+}} : !llvm.array<1 x i1>) : !llvm.struct<(ptr, i32)>
%3 = landingpad { ptr, i32 } catch ptr @_ZTIi catch ptr @_ZTIii
filter [1 x i1] [i1 1]
resume { ptr, i32 } %3
-; CHECK: ^bb2:
+; CHECK: ^[[bb5]]:
+bb5:
; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 1
-
-; CHECK: ^bb3:
- ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> !llvm.ptr
- %6 = invoke ptr @bar(ptr %1) to label %4 unwind label %2
-
-; CHECK: ^bb4:
- ; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : (!llvm.ptr, i32) -> ()
- invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %4 unwind label %2
-
-; CHECK: ^bb5:
- ; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : !llvm.ptr, (!llvm.ptr, i32) -> ()
- invoke void (ptr, ...) undef(ptr %1, i32 0) to label %4 unwind label %2
-
-; CHECK: ^bb6:
- ; CHECK: llvm.return %{{[0-9]+}} : i32
- ret i32 0
}
declare i32 @foo2()
diff --git a/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll b/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll
new file mode 100644
index 0000000..8a84f4b
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll
@@ -0,0 +1,35 @@
+; RUN: mlir-translate -import-llvm %s | FileCheck %s
+
+; Test unreachable blocks are dropped.
+
+; CHECK-LABEL: llvm.func @unreachable_block
+define void @unreachable_block(float %0) {
+.entry:
+ ; CHECK: llvm.return
+ ret void
+
+unreachable:
+ ; CHECK-NOT: llvm.fadd
+ %1 = fadd float %0, %1
+ br label %unreachable
+}
+
+; Test unreachable blocks with back edges are supported.
+
+; CHECK-LABEL: llvm.func @back_edge
+define i32 @back_edge(i32 %0) {
+.entry:
+ ; CHECK: llvm.br ^[[RET:.*]](%{{.*}})
+ br label %ret
+ret:
+ ; CHECK: ^[[RET]](%{{.*}}: i32)
+ %1 = phi i32 [ %0, %.entry ], [ %2, %unreachable ]
+ ; CHECK: llvm.return %{{.*}} : i32
+ ret i32 %1
+
+unreachable:
+ ; CHECK-NOT: add
+ %2 = add i32 %0, %2
+ %3 = icmp eq i32 %2, 42
+ br i1 %3, label %ret, label %unreachable
+}