aboutsummaryrefslogtreecommitdiff
path: root/mlir/include
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2024-03-14 17:06:39 -0700
committerGitHub <noreply@github.com>2024-03-14 17:06:39 -0700
commit4daf86ef3f9a6781eedbe5d5a161e4f1e75df323 (patch)
tree32585d79ec873ceccb1ff1dd3522a35d13dc4388 /mlir/include
parente4b772444c8176abe30d364e4a946ee6c8ae8de4 (diff)
downloadllvm-4daf86ef3f9a6781eedbe5d5a161e4f1e75df323.zip
llvm-4daf86ef3f9a6781eedbe5d5a161e4f1e75df323.tar.gz
llvm-4daf86ef3f9a6781eedbe5d5a161e4f1e75df323.tar.bz2
[mlir][sparse] refactoring sparse runtime lib into less paths (#85332)
Two constructors could be easily refactored into one after a lot of previous deprecated code has been removed.
Diffstat (limited to 'mlir/include')
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/File.h2
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h88
2 files changed, 33 insertions, 57 deletions
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
index f927b82..714e664 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h
@@ -206,7 +206,7 @@ public:
auto *lvlCOO = readCOO<V>(map, lvlSizes);
auto *tensor = SparseTensorStorage<P, I, V>::newFromCOO(
dimRank, getDimSizes(), lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim,
- *lvlCOO);
+ lvlCOO);
delete lvlCOO;
return tensor;
}
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index b207fc1..773957a 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -201,33 +201,18 @@ class SparseTensorStorage final : public SparseTensorStorageBase {
public:
/// Constructs a sparse tensor with the given encoding, and allocates
- /// overhead storage according to some simple heuristics. When the
- /// `bool` argument is true and `lvlTypes` are all dense, then this
- /// ctor will also initialize the values array with zeros. That
- /// argument should be true when an empty tensor is intended; whereas
- /// it should usually be false when the ctor will be followed up by
- /// some other form of initialization.
+ /// overhead storage according to some simple heuristics. When lvlCOO
+ /// is set, the sparse tensor initializes with the contents from that
+ /// data structure. Otherwise, an empty sparse tensor results.
SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
uint64_t lvlRank, const uint64_t *lvlSizes,
const LevelType *lvlTypes, const uint64_t *dim2lvl,
- const uint64_t *lvl2dim, SparseTensorCOO<V> *lvlCOO,
- bool initializeValuesIfAllDense);
+ const uint64_t *lvl2dim, SparseTensorCOO<V> *lvlCOO);
/// Constructs a sparse tensor with the given encoding, and initializes
- /// the contents from the COO. This ctor performs the same heuristic
- /// overhead-storage allocation as the ctor above.
- SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
- uint64_t lvlRank, const uint64_t *lvlSizes,
- const LevelType *lvlTypes, const uint64_t *dim2lvl,
- const uint64_t *lvl2dim, SparseTensorCOO<V> &lvlCOO);
-
- /// Constructs a sparse tensor with the given encoding, and initializes
- /// the contents from the level buffers. This ctor allocates exactly
- /// the required amount of overhead storage, not using any heuristics.
- /// It assumes that the data provided by `lvlBufs` can be directly used to
- /// interpret the result sparse tensor and performs *NO* integrity test on the
- /// input data. It also assume that the trailing COO coordinate buffer is
- /// passed in as a single AoS memory.
+ /// the contents from the level buffers. The constructor assumes that the
+ /// data provided by `lvlBufs` can be directly used to interpret the result
+ /// sparse tensor and performs no integrity test on the input data.
SparseTensorStorage(uint64_t dimRank, const uint64_t *dimSizes,
uint64_t lvlRank, const uint64_t *lvlSizes,
const LevelType *lvlTypes, const uint64_t *dim2lvl,
@@ -244,16 +229,14 @@ public:
newFromCOO(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
const uint64_t *dim2lvl, const uint64_t *lvl2dim,
- SparseTensorCOO<V> &lvlCOO);
+ SparseTensorCOO<V> *lvlCOO);
- /// Allocates a new sparse tensor and initialize it with the data stored level
- /// buffers directly.
+ /// Allocates a new sparse tensor and initialize it from the given buffers.
static SparseTensorStorage<P, C, V> *
- packFromLvlBuffers(uint64_t dimRank, const uint64_t *dimSizes,
- uint64_t lvlRank, const uint64_t *lvlSizes,
- const LevelType *lvlTypes, const uint64_t *dim2lvl,
- const uint64_t *lvl2dim, uint64_t srcRank,
- const intptr_t *buffers);
+ newFromBuffers(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
+ const uint64_t *lvlSizes, const LevelType *lvlTypes,
+ const uint64_t *dim2lvl, const uint64_t *lvl2dim,
+ uint64_t srcRank, const intptr_t *buffers);
~SparseTensorStorage() final = default;
@@ -563,9 +546,9 @@ SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newEmpty(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
const uint64_t *dim2lvl, const uint64_t *lvl2dim) {
+ SparseTensorCOO<V> *noLvlCOO = nullptr;
return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
- lvlTypes, dim2lvl, lvl2dim, nullptr,
- true);
+ lvlTypes, dim2lvl, lvl2dim, noLvlCOO);
}
template <typename P, typename C, typename V>
@@ -573,13 +556,14 @@ SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newFromCOO(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
const uint64_t *dim2lvl, const uint64_t *lvl2dim,
- SparseTensorCOO<V> &lvlCOO) {
+ SparseTensorCOO<V> *lvlCOO) {
+ assert(lvlCOO);
return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
lvlTypes, dim2lvl, lvl2dim, lvlCOO);
}
template <typename P, typename C, typename V>
-SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::packFromLvlBuffers(
+SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newFromBuffers(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
const uint64_t *dim2lvl, const uint64_t *lvl2dim, uint64_t srcRank,
@@ -599,10 +583,9 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
const uint64_t *dim2lvl, const uint64_t *lvl2dim,
- SparseTensorCOO<V> *lvlCOO, bool initializeValuesIfAllDense)
+ SparseTensorCOO<V> *lvlCOO)
: SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
dim2lvl, lvl2dim) {
- assert(!lvlCOO || lvlRank == lvlCOO->getRank());
// Provide hints on capacity of positions and coordinates.
// TODO: needs much fine-tuning based on actual sparsity; currently
// we reserve position/coordinate space based on all previous dense
@@ -633,27 +616,20 @@ SparseTensorStorage<P, C, V>::SparseTensorStorage(
sz = detail::checkedMul(sz, lvlSizes[l]);
}
}
- if (allDense && initializeValuesIfAllDense)
+ if (lvlCOO) {
+ /* New from COO: ensure it is sorted. */
+ assert(lvlCOO->getRank() == lvlRank);
+ lvlCOO->sort();
+ // Now actually insert the `elements`.
+ const auto &elements = lvlCOO->getElements();
+ const uint64_t nse = elements.size();
+ assert(values.size() == 0);
+ values.reserve(nse);
+ fromCOO(elements, 0, nse, 0);
+ } else if (allDense) {
+ /* New empty (all dense) */
values.resize(sz, 0);
-}
-
-template <typename P, typename C, typename V>
-SparseTensorStorage<P, C, V>::SparseTensorStorage( // NOLINT
- uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
- const uint64_t *lvlSizes, const LevelType *lvlTypes,
- const uint64_t *dim2lvl, const uint64_t *lvl2dim,
- SparseTensorCOO<V> &lvlCOO)
- : SparseTensorStorage(dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes,
- dim2lvl, lvl2dim, nullptr, false) {
- // Ensure lvlCOO is sorted.
- assert(lvlRank == lvlCOO.getRank());
- lvlCOO.sort();
- // Now actually insert the `elements`.
- const auto &elements = lvlCOO.getElements();
- const uint64_t nse = elements.size();
- assert(values.size() == 0);
- values.reserve(nse);
- fromCOO(elements, 0, nse, 0);
+ }
}
template <typename P, typename C, typename V>