aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUday Bondhugula <uday@polymagelabs.com>2020-04-08 12:31:48 +0530
committerUday Bondhugula <uday@polymagelabs.com>2020-04-09 08:47:50 +0530
commitd314b7d5ca94e60f75fe23b4b052f131880e8a2f (patch)
tree8bfbb298b68cedc3b36d4b8ec9f5bfcb3234e3b8
parente7db1aec3bdb833832056894f5eba2f359a7c384 (diff)
downloadllvm-d314b7d5ca94e60f75fe23b4b052f131880e8a2f.zip
llvm-d314b7d5ca94e60f75fe23b4b052f131880e8a2f.tar.gz
llvm-d314b7d5ca94e60f75fe23b4b052f131880e8a2f.tar.bz2
[MLIR] ShapedType accessor minor fixes + add isDynamicDim accessor
Minor fixes and cleanup for ShapedType accessors, use ShapedType::kDynamicSize, add ShapedType::isDynamicDim. Differential Revision: https://reviews.llvm.org/D77710
-rw-r--r--mlir/include/mlir/IR/StandardTypes.h10
-rw-r--r--mlir/lib/Analysis/Utils.cpp7
-rw-r--r--mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp7
-rw-r--r--mlir/lib/Dialect/Affine/IR/AffineOps.cpp2
-rw-r--r--mlir/lib/Dialect/StandardOps/IR/Ops.cpp13
-rw-r--r--mlir/lib/IR/StandardTypes.cpp11
6 files changed, 29 insertions, 21 deletions
diff --git a/mlir/include/mlir/IR/StandardTypes.h b/mlir/include/mlir/IR/StandardTypes.h
index b36b348..cc94d27d 100644
--- a/mlir/include/mlir/IR/StandardTypes.h
+++ b/mlir/include/mlir/IR/StandardTypes.h
@@ -252,7 +252,11 @@ public:
/// If this is ranked type, return the size of the specified dimension.
/// Otherwise, abort.
- int64_t getDimSize(int64_t i) const;
+ int64_t getDimSize(unsigned idx) const;
+
+ /// Returns true if this dimension has a dynamic size (for ranked types);
+ /// aborts for unranked types.
+ bool isDynamicDim(unsigned idx) const;
/// Returns the position of the dynamic dimension relative to just the dynamic
/// dimensions, given its `index` within the shape.
@@ -276,7 +280,9 @@ public:
}
/// Whether the given dimension size indicates a dynamic dimension.
- static constexpr bool isDynamic(int64_t dSize) { return dSize < 0; }
+ static constexpr bool isDynamic(int64_t dSize) {
+ return dSize == kDynamicSize;
+ }
static constexpr bool isDynamicStrideOrOffset(int64_t dStrideOrOffset) {
return dStrideOrOffset == kDynamicStrideOrOffset;
}
diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp
index 3d159f2..90b3e1f 100644
--- a/mlir/lib/Analysis/Utils.cpp
+++ b/mlir/lib/Analysis/Utils.cpp
@@ -330,11 +330,10 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
if (addMemRefDimBounds) {
auto memRefType = memref.getType().cast<MemRefType>();
for (unsigned r = 0; r < rank; r++) {
- cst.addConstantLowerBound(r, 0);
- int64_t dimSize = memRefType.getDimSize(r);
- if (ShapedType::isDynamic(dimSize))
+ cst.addConstantLowerBound(/*pos=*/r, /*lb=*/0);
+ if (memRefType.isDynamicDim(r))
continue;
- cst.addConstantUpperBound(r, dimSize - 1);
+ cst.addConstantUpperBound(/*pos=*/r, memRefType.getDimSize(r) - 1);
}
}
cst.removeTrivialRedundancy();
diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index d9ee9a7..a746af7 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -1888,16 +1888,15 @@ struct DimOpLowering : public ConvertOpToLLVMPattern<DimOp> {
OperandAdaptor<DimOp> transformed(operands);
MemRefType type = dimOp.getOperand().getType().cast<MemRefType>();
- auto shape = type.getShape();
int64_t index = dimOp.getIndex();
// Extract dynamic size from the memref descriptor.
- if (ShapedType::isDynamic(shape[index]))
+ if (type.isDynamicDim(index))
rewriter.replaceOp(op, {MemRefDescriptor(transformed.memrefOrTensor())
.size(rewriter, op->getLoc(), index)});
else
// Use constant for static size.
- rewriter.replaceOp(
- op, createIndexConstant(rewriter, op->getLoc(), shape[index]));
+ rewriter.replaceOp(op, createIndexConstant(rewriter, op->getLoc(),
+ type.getDimSize(index)));
return success();
}
};
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 6d0c4e9..0d03dd7 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -133,7 +133,7 @@ static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp,
unsigned index) {
auto memRefType = memrefDefOp.getType();
// Statically shaped.
- if (!ShapedType::isDynamic(memRefType.getDimSize(index)))
+ if (!memRefType.isDynamicDim(index))
return true;
// Get the position of the dimension among dynamic dimensions;
unsigned dynamicDimPos = memRefType.getDynamicDimIndex(index);
diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
index 7c7a8b0..2f0f4b1 100644
--- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
+++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp
@@ -1068,14 +1068,14 @@ static LogicalResult verify(DimOp op) {
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
// Constant fold dim when the size along the index referred to is a constant.
auto opType = memrefOrTensor().getType();
- int64_t indexSize = ShapedType::kDynamicSize;
+ int64_t dimSize = ShapedType::kDynamicSize;
if (auto tensorType = opType.dyn_cast<RankedTensorType>())
- indexSize = tensorType.getShape()[getIndex()];
+ dimSize = tensorType.getShape()[getIndex()];
else if (auto memrefType = opType.dyn_cast<MemRefType>())
- indexSize = memrefType.getShape()[getIndex()];
+ dimSize = memrefType.getShape()[getIndex()];
- if (!ShapedType::isDynamic(indexSize))
- return IntegerAttr::get(IndexType::get(getContext()), indexSize);
+ if (!ShapedType::isDynamic(dimSize))
+ return IntegerAttr::get(IndexType::get(getContext()), dimSize);
// Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp.
auto memrefType = opType.dyn_cast<MemRefType>();
@@ -2310,13 +2310,12 @@ Value ViewOp::getDynamicOffset() {
static LogicalResult verifyDynamicStrides(MemRefType memrefType,
ArrayRef<int64_t> strides) {
- ArrayRef<int64_t> shape = memrefType.getShape();
unsigned rank = memrefType.getRank();
assert(rank == strides.size());
bool dynamicStrides = false;
for (int i = rank - 2; i >= 0; --i) {
// If size at dim 'i + 1' is dynamic, set the 'dynamicStrides' flag.
- if (ShapedType::isDynamic(shape[i + 1]))
+ if (memrefType.isDynamicDim(i + 1))
dynamicStrides = true;
// If stride at dim 'i' is not dynamic, return error.
if (dynamicStrides && strides[i] != MemRefType::getDynamicStrideOrOffset())
diff --git a/mlir/lib/IR/StandardTypes.cpp b/mlir/lib/IR/StandardTypes.cpp
index 1e7d9f3..cff65e7 100644
--- a/mlir/lib/IR/StandardTypes.cpp
+++ b/mlir/lib/IR/StandardTypes.cpp
@@ -184,9 +184,14 @@ int64_t ShapedType::getRank() const { return getShape().size(); }
bool ShapedType::hasRank() const { return !isa<UnrankedTensorType>(); }
-int64_t ShapedType::getDimSize(int64_t i) const {
- assert(i >= 0 && i < getRank() && "invalid index for shaped type");
- return getShape()[i];
+int64_t ShapedType::getDimSize(unsigned idx) const {
+ assert(idx < getRank() && "invalid index for shaped type");
+ return getShape()[idx];
+}
+
+bool ShapedType::isDynamicDim(unsigned idx) const {
+ assert(idx < getRank() && "invalid index for shaped type");
+ return isDynamic(getShape()[idx]);
}
unsigned ShapedType::getDynamicDimIndex(unsigned index) const {