diff options
author | Aart Bik <39774503+aartbik@users.noreply.github.com> | 2023-12-13 15:18:35 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-13 15:18:35 -0800 |
commit | e52c941921e263aad75a6685caad64d6ab457bee (patch) | |
tree | 7737ff7a3d5a67c036e9215cc96debcc1d4e502d | |
parent | f0d48116904ff477b3faccc6a394f41ba8bd96f6 (diff) | |
download | llvm-e52c941921e263aad75a6685caad64d6ab457bee.zip llvm-e52c941921e263aad75a6685caad64d6ab457bee.tar.gz llvm-e52c941921e263aad75a6685caad64d6ab457bee.tar.bz2 |
[mlir][sparse] minor cleanup of transform/utils (#75396)
Consistent include macro naming
Modified and added comments
6 files changed, 50 insertions, 35 deletions
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h index cd626041..728af84 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_ -#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_ +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_ #include "CodegenUtils.h" #include "LoopEmitter.h" @@ -206,4 +206,4 @@ private: } // namespace sparse_tensor } // namespace mlir -#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_ +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_ diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h index 57de437..8dc57e1 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.h @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_ -#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_ +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_ #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Complex/IR/Complex.h" @@ -434,4 +434,4 @@ inline bool isZeroRankedTensorOrScalar(Type type) { } // namespace sparse_tensor } // namespace mlir -#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_ +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_ diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp index b601172..588400f 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.cpp @@ -1,4 +1,4 @@ -//===- LoopScheduler.cpp -------------------------------------------------===// +//===- IterationGraphSorter.cpp -------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -20,11 +20,10 @@ using namespace mlir::sparse_tensor; namespace { -/// A helper class that visits an affine expression and tries to find an -/// AffineDimExpr to which the corresponding iterator from a GenericOp matches -/// the desired iterator type. -/// If there is no matched iterator type, returns the first DimExpr in the -/// expression. +/// A helper class that visits an affine expression and tries to find +/// an AffineDimExpr to which the corresponding iterator from a GenericOp +/// matches the desired iterator type. If there is no matched iterator +/// type, the method returns the first DimExpr in the expression. class AffineDimFinder : public AffineExprVisitor<AffineDimFinder> { public: explicit AffineDimFinder(ArrayRef<utils::IteratorType> itTypes) @@ -81,11 +80,9 @@ inline static bool includesDenseOutput(SortMask mask) { return includesAny(mask, SortMask::kIncludeDenseOutput); } -/// A helper to compute a topological sort. O(n^2) time complexity -/// as we use adj matrix for the graph. -/// The sorted result will put the first Reduction iterator to the -/// latest possible position. AffineMap IterationGraphSorter::topoSort() { + // The sorted result will put the first Reduction iterator to the + // latest possible position. std::vector<unsigned> redIt; // reduce iterator with 0 degree std::vector<unsigned> parIt; // parallel iterator with 0 degree const unsigned numLoops = getNumLoops(); @@ -170,6 +167,7 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) { // Reset the interation graph. for (auto &row : itGraph) std::fill(row.begin(), row.end(), false); + // Reset cached in-degree. std::fill(inDegree.begin(), inDegree.end(), 0); @@ -179,7 +177,6 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) { // Skip dense inputs when not requested. if ((!enc && !includesDenseInput(mask)) || in == ignored) continue; - addConstraints(in, map); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h index 52ee117..be94bb5 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/IterationGraphSorter.h @@ -1,10 +1,17 @@ -//===- LoopScheduler.h -----------------------------------------*- C++ -*-===// +//===- IterationGraphSorter.h -----------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// +// +// This header file defines the iteration graph sorter (top-sort scheduling). +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_ #include "mlir/IR/AffineMap.h" @@ -21,7 +28,7 @@ class GenericOp; namespace sparse_tensor { -/// Iteration graph sorting. +/// Iteration graph sorting mask, enum class SortMask : unsigned { // The individual mask bits. kIncludeDenseOutput = 0x1, // b001 @@ -34,40 +41,52 @@ enum class SortMask : unsigned { class IterationGraphSorter { public: - // Constructs a scheduler from linalg.generic - // Maybe reuses the class to schedule foreach as well (to address - // non-permutation, e.g, traverse CSR in BSR order). + /// Factory method that construct an iteration graph sorter + /// for the given linalg.generic operation. static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp); - // Returns a permutation that represents the scheduled loop order. - // Note that the returned AffineMap could be null if the kernel can not be - // schedule due to cycles in the iteration graph. + /// Returns a permutation that represents the scheduled loop order. + /// Note that the returned AffineMap could be null if the kernel + /// cannot be scheduled due to cyclic iteration graph. [[nodiscard]] AffineMap sort(SortMask mask, Value ignored = nullptr); + + /// Returns the number of loops in the iteration graph. unsigned getNumLoops() const { return loop2OutLvl.getNumDims(); } private: + // Private constructor. IterationGraphSorter(SmallVector<Value> &&ins, SmallVector<AffineMap> &&loop2InsLvl, Value out, AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes); + // Adds all the constraints in the given loop to level map. void addConstraints(Value t, AffineMap loop2LvlMap); + + /// A helper to compute a topological sort. The method has an + /// O(n^2) time complexity since we use an adjacency matrix + /// representation for the iteration graph. AffineMap topoSort(); // Input tensors and associated loop to level maps. SmallVector<Value> ins; SmallVector<AffineMap> loop2InsLvl; + // Output tensor and associated loop to level map. Value out; AffineMap loop2OutLvl; - // Loop type; + + // Loop itation types; SmallVector<utils::IteratorType> iterTypes; // Adjacent matrix that represents the iteration graph. std::vector<std::vector<bool>> itGraph; + // InDegree used for topo sort. std::vector<unsigned> inDegree; }; } // namespace sparse_tensor } // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_ diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h index fa8b007..78bb53e 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h @@ -6,8 +6,8 @@ // //===----------------------------------------------------------------------===// -#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_ -#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_ +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_ #include <vector> @@ -22,7 +22,7 @@ namespace sparse_tensor { // A compressed <tensor id, level> pair. using TensorLevel = unsigned; -//===----------------------------------------------------------------------===// +// // SparseTensorLoopEmiter class, manages sparse tensors and helps to // generate loop structure to (co)-iterate sparse tensors. // @@ -48,8 +48,7 @@ using TensorLevel = unsigned; // loopEmiter.exitCurrentLoop(); // exit k // loopEmiter.exitCurrentLoop(); // exit j // loopEmiter.exitCurrentLoop(); // exit i -//===----------------------------------------------------------------------===// - +// class LoopEmitter { public: /// Optional callback function to setup dense output tensors when @@ -705,4 +704,4 @@ private: } // namespace sparse_tensor } // namespace mlir -#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_ +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_ diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h index 5c7d8aa..3a61ec7 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/SparseTensorDescriptor.h @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_ -#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_ +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_ #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h" @@ -262,4 +262,4 @@ getMutDescriptorFromTensorTuple(Value tensor, SmallVectorImpl<Value> &fields) { } // namespace sparse_tensor } // namespace mlir -#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSODESCRIPTOR_H_ +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSODESCRIPTOR_H_ |