1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
//===- BufferDeallocationOpInterfaceImpl.cpp ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/SCF/Transforms/BufferDeallocationOpInterfaceImpl.h"
#include "mlir/Dialect/Bufferization/IR/BufferDeallocationOpInterface.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
using namespace mlir;
using namespace mlir::bufferization;
namespace {
/// The `scf.forall.in_parallel` terminator is special in a few ways:
/// * It does not implement the BranchOpInterface or
/// RegionBranchTerminatorOpInterface, but the InParallelOpInterface
/// which is not supported by BufferDeallocation.
/// * It has a graph-like region which only allows one specific tensor op
/// * After bufferization the nested region is always empty
/// For these reasons we provide custom deallocation logic via this external
/// model.
///
/// Example:
/// ```mlir
/// scf.forall (%arg1) in (%arg0) {
/// %alloc = memref.alloc() : memref<2xf32>
/// ...
/// <implicit in_parallel terminator here>
/// }
/// ```
/// gets transformed to
/// ```mlir
/// scf.forall (%arg1) in (%arg0) {
/// %alloc = memref.alloc() : memref<2xf32>
/// ...
/// bufferization.dealloc (%alloc : memref<2xf32>) if (%true)
/// <implicit in_parallel terminator here>
/// }
/// ```
struct InParallelDeallocOpInterface
: public BufferDeallocationOpInterface::ExternalModel<
InParallelDeallocOpInterface, scf::InParallelOp> {
FailureOr<Operation *> process(Operation *op, DeallocationState &state,
const DeallocationOptions &options) const {
auto inParallelOp = cast<scf::InParallelOp>(op);
if (!inParallelOp.getBody()->empty())
return op->emitError("only supported when nested region is empty");
SmallVector<Value> updatedOperandOwnership;
return deallocation_impl::insertDeallocOpForReturnLike(
state, op, {}, updatedOperandOwnership);
}
};
struct ReduceReturnOpInterface
: public BufferDeallocationOpInterface::ExternalModel<
ReduceReturnOpInterface, scf::ReduceReturnOp> {
FailureOr<Operation *> process(Operation *op, DeallocationState &state,
const DeallocationOptions &options) const {
auto reduceReturnOp = cast<scf::ReduceReturnOp>(op);
if (isa<BaseMemRefType>(reduceReturnOp.getOperand().getType()))
return op->emitError("only supported when operand is not a MemRef");
SmallVector<Value> updatedOperandOwnership;
return deallocation_impl::insertDeallocOpForReturnLike(
state, op, {}, updatedOperandOwnership);
}
};
} // namespace
void mlir::scf::registerBufferDeallocationOpInterfaceExternalModels(
DialectRegistry ®istry) {
registry.addExtension(+[](MLIRContext *ctx, SCFDialect *dialect) {
InParallelOp::attachInterface<InParallelDeallocOpInterface>(*ctx);
ReduceReturnOp::attachInterface<ReduceReturnOpInterface>(*ctx);
});
}
|