aboutsummaryrefslogtreecommitdiff
path: root/mlir
diff options
context:
space:
mode:
authorAart Bik <39774503+aartbik@users.noreply.github.com>2023-12-06 09:35:30 -0800
committerGitHub <noreply@github.com>2023-12-06 09:35:30 -0800
commit5b0db27aceb46e66d9c14e950a546cba5fb33a2a (patch)
treea33489d37c6dee505b1187c26e5f1555ee37ce72 /mlir
parent341a51aac4b344c96d9d8774cf97a57cad76a9f4 (diff)
downloadllvm-5b0db27aceb46e66d9c14e950a546cba5fb33a2a.zip
llvm-5b0db27aceb46e66d9c14e950a546cba5fb33a2a.tar.gz
llvm-5b0db27aceb46e66d9c14e950a546cba5fb33a2a.tar.bz2
[mlir][sparse] remove LoopOrd type (#74540)
Rationale: We no longer deal with topsort during sparsification, so that LoopId == LoopOrd for all methods. This first revision removes the types. A follow up revision will simplify some other remaining constructs that deal with loop order (e.g. at and ldx).
Diffstat (limited to 'mlir')
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp5
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h9
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h43
-rw-r--r--mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp14
4 files changed, 19 insertions, 52 deletions
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp
index cc05f1d..312aefc 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.cpp
@@ -206,9 +206,8 @@ void CodegenEnv::updateInsertionChain(Value chain) {
insChain = chain;
}
-// FIXME: clarify what this "rank" is really supposed to mean/be.
-bool CodegenEnv::atExpandLevel(OpOperand *o, unsigned rank, LoopOrd n) const {
- return sparseOut == o && outerParNest == static_cast<LoopOrd>(rank - 1) &&
+bool CodegenEnv::atExpandLevel(OpOperand *o, unsigned rank, LoopId n) const {
+ return sparseOut == o && outerParNest == static_cast<LoopId>(rank - 1) &&
outerParNest == n;
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
index 7e825dd..27dc407 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenEnv.h
@@ -118,9 +118,7 @@ public:
/// It also sets the sparseOut if the output tensor is sparse.
bool isAdmissibleTensorExp(ExprId e);
- /// Returns the induction-variable for the loop identified by the given
- /// `LoopId`. This method handles application of the topological sort
- /// in order to convert the `LoopId` into the corresponding `LoopOrd`.
+ /// Returns the induction-variable for the given loop.
Value getLoopVar(LoopId i) const;
//
@@ -133,8 +131,7 @@ public:
Value getInsertionChain() const { return insChain; }
void updateInsertionChain(Value chain);
- // FIXME: clarify what this "rank" is really supposed to mean/be.
- bool atExpandLevel(OpOperand *o, unsigned rank, LoopOrd n) const;
+ bool atExpandLevel(OpOperand *o, unsigned rank, LoopId n) const;
void startExpand(Value values, Value filled, Value added, Value count);
bool isExpand() const { return expValues != nullptr; }
void updateExpandCount(Value count);
@@ -180,7 +177,7 @@ private:
// expansion in the innermost loop nest (`expValues` through `expCount`).
OpOperand *sparseOut;
// The count of outer non-filter loops, as defined by `isAdmissibleTopoOrder`.
- LoopOrd outerParNest;
+ LoopId outerParNest;
Value insChain;
Value expValues;
Value expFilled;
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
index e3e620b..0bdd9d4 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h
@@ -19,31 +19,9 @@
namespace mlir {
namespace sparse_tensor {
-//===----------------------------------------------------------------------===//
-/// The position of a loop in the loop-stack, or the position of a
-/// `LoopId` in a topologically-sorted list of `LoopId`s.
-///
-/// Although this type may have the same cardinality as `LoopId`, it must
-/// not be confused with that type. The `LoopId` type is used by the `Merger`
-/// as a unique identifier for loop-variables, regardless of the ordering
-/// of those loops. Whereas the `LoopOrd` type is used by the `LoopEmitter`
-/// (and `CodegenEnv`) to refer to the actual order in which loops are
-/// generated.
-///
-/// TODO: further explicate the correspondences between these various
-/// types. In particular, since the `$dim` argument to `linalg::IndexOp`
-/// is a De Bruijn index, it seems like that should correspond to `LoopOrd`,
-/// and yet the `Merger` has that correspond with `LoopId` instead.
-/// In addition `LoopEmitter::genAffine` has `AffineDimExpr::position`
-/// correspond to `LoopId`, however it is unclear what the providence
-/// of those `AffineDimExpr` is.
-//
-// TODO: use a struct/class rather than a typedef, so that we can actually
-// typecheck this to avoid mixups in the code.
-using LoopOrd = unsigned;
-
// A compressed <tensor id, level> pair.
using TensorLevel = unsigned;
+
//===----------------------------------------------------------------------===//
// SparseTensorLoopEmiter class, manages sparse tensors and helps to
// generate loop structure to (co)-iterate sparse tensors.
@@ -108,9 +86,7 @@ public:
/// to the position of that tensor `Value` in the array). Setting
/// `isSparseOut` indicates that the sparse output tensor is empty,
/// so the loop emitter will generate loops over it according to the
- /// level-sizes. The `topSort` array specifies the actual order in
- /// which loops are generated, thus providing a mapping from `LoopOrd`
- /// to `LoopId`.
+ /// level-sizes.
void initialize(ValueRange tensors, StringAttr loopTag = nullptr,
bool hasOutput = false, bool isSparseOut = false,
unsigned numLoops = 0, DependentLvlGetter getter = nullptr);
@@ -193,21 +169,16 @@ public:
}
/// Fills the out-parameter with the loop induction variables for all
- /// loops in the current loop-stack. The variables are given in the
- /// same order as the loop-stack, hence `ivs` should be indexed into
- /// by `LoopOrd` (not `LoopId`).
+ /// loops in the current loop-stack.
SmallVector<Value> getLoopIVs() const {
return llvm::to_vector(getLoopIVsRange());
}
- /// Gets the current depth of the loop-stack. The result is given
- /// the type `LoopOrd` for the same reason as one-past-the-end iterators.
- LoopOrd getCurrentDepth() const {
- return llvm::range_size(getLoopIVsRange());
- }
+ /// Gets the current depth of the loop-stack.
+ LoopId getCurrentDepth() const { return llvm::range_size(getLoopIVsRange()); }
- /// Gets loop induction variable for the given `LoopOrd`.
- Value getLoopIV(LoopOrd n) const {
+ /// Gets loop induction variable for the given loop
+ Value getLoopIV(LoopId n) const {
if (n >= getCurrentDepth())
return Value();
auto it = getLoopIVsRange().begin();
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
index d171087..d03e961 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
@@ -411,7 +411,7 @@ static void genInsertionStore(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
Location loc = op.getLoc();
// Direct insertion in lexicographic coordinate order.
if (!env.isExpand()) {
- const LoopOrd numLoops = op.getRank(t);
+ const LoopId numLoops = op.getRank(t);
// Retrieves the first `numLoop` induction variables.
SmallVector<Value> ivs = llvm::to_vector(
llvm::drop_end(env.emitter().getLoopIVsRange(),
@@ -713,7 +713,7 @@ static void genInvariants(CodegenEnv &env, OpBuilder &builder, ExprId exp,
}
/// Generates an expanded access pattern in innermost dimension.
-static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
+static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopId at,
bool atStart) {
linalg::GenericOp op = env.op();
OpOperand *lhs = op.getDpsInitOperand(0);
@@ -740,7 +740,7 @@ static void genExpand(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
r.getResult(3));
} else {
SmallVector<Value> indices;
- for (LoopOrd i = 0; i < at; i++)
+ for (LoopId i = 0; i < at; i++)
indices.push_back(env.emitter().getLoopIV(i));
Value values = env.getExpandValues();
Value filled = env.getExpandFilled();
@@ -815,7 +815,7 @@ static Operation *genCoIteration(CodegenEnv &env, OpBuilder &builder,
/// Generates a for-loop or a while-loop, depending on whether it implements
/// singleton iteration or co-iteration over the given conjunction.
-static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopOrd at,
+static Operation *genLoop(CodegenEnv &env, OpBuilder &builder, LoopId at,
bool needsUniv, ArrayRef<TensorLevel> tidLvls) {
bool tryParallel = shouldTryParallize(env, at, at == 0, tidLvls);
return genCoIteration(env, builder, at, tidLvls, tryParallel, needsUniv);
@@ -943,7 +943,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
/// Starts a loop sequence at given level. Returns true if
/// the universal loop index must be maintained at this level.
static bool startLoopSeq(CodegenEnv &env, OpBuilder &builder, ExprId exp,
- LoopOrd idx, LoopId ldx, LatSetId lts) {
+ LoopId idx, LoopId ldx, LatSetId lts) {
assert(!env.getLoopVar(idx));
// Emit invariants at this loop sequence level.
genInvariants(env, builder, exp, ldx, /*atStart=*/true);
@@ -1127,7 +1127,7 @@ static bool translateBitsToTidLvlPairs(
/// Starts a single loop in current sequence.
static std::pair<Operation *, bool> startLoop(CodegenEnv &env,
- OpBuilder &builder, LoopOrd at,
+ OpBuilder &builder, LoopId at,
LatPointId li, bool needsUniv) {
// The set of tensors + lvls to generate loops on
SmallVector<TensorLevel> tidLvls;
@@ -1199,7 +1199,7 @@ static void endLoopSeq(CodegenEnv &env, OpBuilder &builder, unsigned exp,
/// to manage the complexity of implementing co-iteration over unions
/// and intersections of sparse iterations spaces.
static void genStmt(CodegenEnv &env, RewriterBase &rewriter, ExprId exp,
- LoopOrd at) {
+ LoopId at) {
// At each leaf, assign remaining tensor (sub)expression to output tensor.
if (at == env.getLoopNum()) {
Value rhs = genExp(env, rewriter, exp);