aboutsummaryrefslogtreecommitdiff
path: root/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp')
-rw-r--r--mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp54
1 files changed, 23 insertions, 31 deletions
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index e645afe..fc0515b 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -195,43 +195,25 @@ DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
return res;
}
-/// Given:
-/// 1. an induction variable `iv` of type AffineForOp;
-/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
-/// determines whether `memoryOp` has a contiguous access along `iv`. Contiguous
-/// is defined as either invariant or varying only along a unique MemRef dim.
-/// Upon success, the unique MemRef dim is written in `memRefDim` (or -1 to
-/// convey the memRef access is invariant along `iv`).
-///
-/// Prerequisites:
-/// 1. `memRefDim` ~= nullptr;
-/// 2. `iv` of the proper type;
-/// 3. the MemRef accessed by `memoryOp` has no layout map or at most an
-/// identity layout map.
-///
-/// Currently only supports no layoutMap or identity layoutMap in the MemRef.
-/// Returns false if the MemRef has a non-identity layoutMap or more than 1
-/// layoutMap. This is conservative.
-///
-// TODO: check strides.
+// TODO: check access stride.
template <typename LoadOrStoreOp>
-static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
- int *memRefDim) {
- static_assert(
- llvm::is_one_of<LoadOrStoreOp, AffineLoadOp, AffineStoreOp>::value,
- "Must be called on either LoadOp or StoreOp");
+bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
+ int *memRefDim) {
+ static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
+ AffineWriteOpInterface>::value,
+ "Must be called on either an affine read or write op");
assert(memRefDim && "memRefDim == nullptr");
auto memRefType = memoryOp.getMemRefType();
if (!memRefType.getLayout().isIdentity())
- return memoryOp.emitError("NYI: non-trivial layoutMap"), false;
+ return memoryOp.emitError("NYI: non-trivial layout map"), false;
int uniqueVaryingIndexAlongIv = -1;
auto accessMap = memoryOp.getAffineMap();
SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
unsigned numDims = accessMap.getNumDims();
for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
- // Gather map operands used result expr 'i' in 'exprOperands'.
+ // Gather map operands used in result expr 'i' in 'exprOperands'.
SmallVector<Value, 4> exprOperands;
auto resultExpr = accessMap.getResult(i);
resultExpr.walk([&](AffineExpr expr) {
@@ -241,7 +223,7 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
});
// Check access invariance of each operand in 'exprOperands'.
- for (auto exprOperand : exprOperands) {
+ for (Value exprOperand : exprOperands) {
if (!isAccessIndexInvariant(iv, exprOperand)) {
if (uniqueVaryingIndexAlongIv != -1) {
// 2+ varying indices -> do not vectorize along iv.
@@ -259,6 +241,13 @@ static bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
return true;
}
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineReadOpInterface loadOp,
+ int *memRefDim);
+template bool mlir::affine::isContiguousAccess(Value iv,
+ AffineWriteOpInterface loadOp,
+ int *memRefDim);
+
template <typename LoadOrStoreOp>
static bool isVectorElement(LoadOrStoreOp memoryOp) {
auto memRefType = memoryOp.getMemRefType();
@@ -344,10 +333,13 @@ bool mlir::affine::isVectorizableLoopBody(
auto load = dyn_cast<AffineLoadOp>(op);
auto store = dyn_cast<AffineStoreOp>(op);
int thisOpMemRefDim = -1;
- bool isContiguous = load ? isContiguousAccess(loop.getInductionVar(), load,
- &thisOpMemRefDim)
- : isContiguousAccess(loop.getInductionVar(), store,
- &thisOpMemRefDim);
+ bool isContiguous =
+ load ? isContiguousAccess(loop.getInductionVar(),
+ cast<AffineReadOpInterface>(*load),
+ &thisOpMemRefDim)
+ : isContiguousAccess(loop.getInductionVar(),
+ cast<AffineWriteOpInterface>(*store),
+ &thisOpMemRefDim);
if (thisOpMemRefDim != -1) {
// If memory accesses vary across different dimensions then the loop is
// not vectorizable.