aboutsummaryrefslogtreecommitdiff
path: root/mlir/lib/Dialect
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/lib/Dialect')
-rw-r--r--mlir/lib/Dialect/Arith/Utils/Utils.cpp6
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp13
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp3
-rw-r--r--mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp4
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp33
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp5
-rw-r--r--mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp5
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp2
-rw-r--r--mlir/lib/Dialect/Shard/IR/ShardOps.cpp12
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaOps.cpp7
-rw-r--r--mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp7
-rw-r--r--mlir/lib/Dialect/Utils/IndexingUtils.cpp25
-rw-r--r--mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp10
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp3
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp6
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp4
-rw-r--r--mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp3
17 files changed, 67 insertions, 81 deletions
diff --git a/mlir/lib/Dialect/Arith/Utils/Utils.cpp b/mlir/lib/Dialect/Arith/Utils/Utils.cpp
index b1fc9aa..f54baff 100644
--- a/mlir/lib/Dialect/Arith/Utils/Utils.cpp
+++ b/mlir/lib/Dialect/Arith/Utils/Utils.cpp
@@ -351,9 +351,9 @@ Value createProduct(OpBuilder &builder, Location loc, ArrayRef<Value> values,
Value one = ConstantOp::create(builder, loc, resultType,
builder.getOneAttr(resultType));
ArithBuilder arithBuilder(builder, loc);
- return std::accumulate(
- values.begin(), values.end(), one,
- [&arithBuilder](Value acc, Value v) { return arithBuilder.mul(acc, v); });
+ return llvm::accumulate(values, one, [&arithBuilder](Value acc, Value v) {
+ return arithBuilder.mul(acc, v);
+ });
}
/// Map strings to float types.
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
index a50ddbe..624519f 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
@@ -55,16 +55,6 @@ static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
return returnOp;
}
-/// Return the func::FuncOp called by `callOp`.
-static func::FuncOp getCalledFunction(CallOpInterface callOp) {
- SymbolRefAttr sym =
- llvm::dyn_cast_if_present<SymbolRefAttr>(callOp.getCallableForCallee());
- if (!sym)
- return nullptr;
- return dyn_cast_or_null<func::FuncOp>(
- SymbolTable::lookupNearestSymbolFrom(callOp, sym));
-}
-
LogicalResult
mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) {
IRRewriter rewriter(module.getContext());
@@ -72,7 +62,8 @@ mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) {
DenseMap<func::FuncOp, DenseSet<func::CallOp>> callerMap;
// Collect the mapping of functions to their call sites.
module.walk([&](func::CallOp callOp) {
- if (func::FuncOp calledFunc = getCalledFunction(callOp)) {
+ if (func::FuncOp calledFunc =
+ dyn_cast_or_null<func::FuncOp>(callOp.resolveCallable())) {
callerMap[calledFunc].insert(callOp);
}
});
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 19eba6b..b5f8dda 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -2460,8 +2460,7 @@ static LogicalResult verifyDistributedType(Type expanded, Type distributed,
<< dDim << ")";
scales[i] = eDim / dDim;
}
- if (std::accumulate(scales.begin(), scales.end(), 1,
- std::multiplies<int64_t>()) != warpSize)
+ if (llvm::product_of(scales) != warpSize)
return op->emitOpError()
<< "incompatible distribution dimensions from " << expandedVecType
<< " to " << distributedVecType << " with warp size = " << warpSize;
diff --git a/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp b/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp
index 88f531f..572b746 100644
--- a/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp
+++ b/mlir/lib/Dialect/GPU/Utils/DistributionUtils.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Value.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include <numeric>
@@ -118,8 +119,7 @@ bool WarpDistributionPattern::delinearizeLaneId(
return false;
sizes.push_back(large / small);
}
- if (std::accumulate(sizes.begin(), sizes.end(), 1,
- std::multiplies<int64_t>()) != warpSize)
+ if (llvm::product_of(sizes) != warpSize)
return false;
AffineExpr s0, s1;
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 7f419a0..5edcc40b 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -1593,6 +1593,39 @@ mlir::NVVM::IDArgPair CpAsyncBulkPrefetchOp::getIntrinsicIDAndArgs(
return {id, std::move(args)};
}
+mlir::NVVM::IDArgPair CpAsyncBulkGlobalToSharedClusterOp::getIntrinsicIDAndArgs(
+ Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
+ auto thisOp = cast<NVVM::CpAsyncBulkGlobalToSharedClusterOp>(op);
+ llvm::SmallVector<llvm::Value *> args;
+
+ // Fill the Intrinsic Args: dst, mbar, src, size.
+ args.push_back(mt.lookupValue(thisOp.getDstMem()));
+ args.push_back(mt.lookupValue(thisOp.getMbar()));
+ args.push_back(mt.lookupValue(thisOp.getSrcMem()));
+ args.push_back(mt.lookupValue(thisOp.getSize()));
+
+ // Multicast mask, if available.
+ mlir::Value multicastMask = thisOp.getMulticastMask();
+ const bool hasMulticastMask = static_cast<bool>(multicastMask);
+ llvm::Value *i16Unused = llvm::ConstantInt::get(builder.getInt16Ty(), 0);
+ args.push_back(hasMulticastMask ? mt.lookupValue(multicastMask) : i16Unused);
+
+ // Cache hint, if available.
+ mlir::Value cacheHint = thisOp.getL2CacheHint();
+ const bool hasCacheHint = static_cast<bool>(cacheHint);
+ llvm::Value *i64Unused = llvm::ConstantInt::get(builder.getInt64Ty(), 0);
+ args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
+
+ // Flag arguments for multicast and cachehint.
+ args.push_back(builder.getInt1(hasMulticastMask));
+ args.push_back(builder.getInt1(hasCacheHint));
+
+ llvm::Intrinsic::ID id =
+ llvm::Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster;
+
+ return {id, std::move(args)};
+}
+
mlir::NVVM::IDArgPair CpAsyncBulkSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
auto thisOp = cast<NVVM::CpAsyncBulkSharedCTAToGlobalOp>(op);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp
index f277c5f..0ae2a9c 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/ShardingInterfaceImpl.cpp
@@ -266,9 +266,8 @@ struct StructuredOpShardingInterface
LinalgOp linalgOp = llvm::cast<LinalgOp>(op);
SmallVector<utils::IteratorType> iteratorTypes =
linalgOp.getIteratorTypesArray();
- unsigned reductionItersCount = std::accumulate(
- iteratorTypes.begin(), iteratorTypes.end(), 0,
- [](unsigned count, utils::IteratorType iter) {
+ unsigned reductionItersCount = llvm::accumulate(
+ iteratorTypes, 0u, [](unsigned count, utils::IteratorType iter) {
return count + (iter == utils::IteratorType::reduction);
});
shard::ReductionKind reductionKind = getReductionKindOfLinalgOp(linalgOp);
diff --git a/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp b/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp
index b663908..8c4f80f 100644
--- a/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp
+++ b/mlir/lib/Dialect/Quant/Utils/UniformSupport.cpp
@@ -8,6 +8,7 @@
#include "mlir/Dialect/Quant/Utils/UniformSupport.h"
#include "mlir/IR/BuiltinTypes.h"
+#include "llvm/ADT/STLExtras.h"
#include <numeric>
using namespace mlir;
@@ -76,9 +77,7 @@ UniformQuantizedPerAxisValueConverter::convert(DenseFPElementsAttr attr) {
// using the right quantization parameters.
int64_t flattenIndex = 0;
auto shape = type.getShape();
- int64_t chunkSize =
- std::accumulate(std::next(shape.begin(), quantizationDim + 1),
- shape.end(), 1, std::multiplies<int64_t>());
+ int64_t chunkSize = llvm::product_of(shape.drop_front(quantizationDim + 1));
Type newElementType = IntegerType::get(attr.getContext(), storageBitWidth);
return attr.mapValues(newElementType, [&](const APFloat &old) {
int chunkIndex = (flattenIndex++) / chunkSize;
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index 5511998..fe50865 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -400,7 +400,7 @@ LogicalResult spirv::CompositeConstructOp::verify() {
return emitOpError("operand element type mismatch: expected to be ")
<< resultType.getElementType() << ", but provided " << elementType;
}
- unsigned totalCount = std::accumulate(sizes.begin(), sizes.end(), 0);
+ unsigned totalCount = llvm::sum_of(sizes);
if (totalCount != cType.getNumElements())
return emitOpError("has incorrect number of operands: expected ")
<< cType.getNumElements() << ", but provided " << totalCount;
diff --git a/mlir/lib/Dialect/Shard/IR/ShardOps.cpp b/mlir/lib/Dialect/Shard/IR/ShardOps.cpp
index 08fccfa..135c033 100644
--- a/mlir/lib/Dialect/Shard/IR/ShardOps.cpp
+++ b/mlir/lib/Dialect/Shard/IR/ShardOps.cpp
@@ -1010,18 +1010,6 @@ static LogicalResult verifyInGroupDevice(Location loc, StringRef deviceName,
return success();
}
-template <typename It>
-static auto product(It begin, It end) {
- using ElementType = std::decay_t<decltype(*begin)>;
- return std::accumulate(begin, end, static_cast<ElementType>(1),
- std::multiplies<ElementType>());
-}
-
-template <typename R>
-static auto product(R &&range) {
- return product(adl_begin(range), adl_end(range));
-}
-
static LogicalResult verifyDimensionCompatibility(Location loc,
int64_t expectedDimSize,
int64_t resultDimSize,
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index c51b5e9..00f84bc 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -2368,9 +2368,10 @@ llvm::LogicalResult tosa::ReshapeOp::verify() {
}
}
- int64_t newShapeElementsNum = std::accumulate(
- shapeValues.begin(), shapeValues.end(), 1LL,
- [](int64_t acc, int64_t dim) { return (dim > 0) ? acc * dim : acc; });
+ int64_t newShapeElementsNum =
+ llvm::accumulate(shapeValues, int64_t(1), [](int64_t acc, int64_t dim) {
+ return (dim > 0) ? acc * dim : acc;
+ });
bool isStaticNewShape =
llvm::all_of(shapeValues, [](int64_t s) { return s > 0; });
if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) ||
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
index d33ebe3..5786f53 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp
@@ -20,6 +20,7 @@
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectResourceBlobManager.h"
#include "mlir/IR/Matchers.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
using namespace mlir;
@@ -375,8 +376,7 @@ llvm::APInt calculateReducedValue(const mlir::ElementsAttr &oldTensorAttr,
for (int64_t reductionAxisVal = 1; reductionAxisVal < oldShape[reductionAxis];
++reductionAxisVal) {
- int64_t stride = std::accumulate(oldShape.begin() + reductionAxis + 1,
- oldShape.end(), 1, std::multiplies<int>());
+ int64_t stride = llvm::product_of(oldShape.drop_front(reductionAxis + 1));
int64_t index = indexAtOldTensor + stride * reductionAxisVal;
reducedValue =
OperationType::calcOneElement(reducedValue, oldTensor[index]);
@@ -424,8 +424,7 @@ struct ReduceConstantOptimization : public OpRewritePattern<OperationType> {
auto oldShape = shapedOldElementsValues.getShape();
auto newShape = resultType.getShape();
- auto newNumOfElements = std::accumulate(newShape.begin(), newShape.end(), 1,
- std::multiplies<int>());
+ int64_t newNumOfElements = llvm::product_of(newShape);
llvm::SmallVector<APInt> newReducedTensor(newNumOfElements);
for (int64_t reductionIndex = 0; reductionIndex < newNumOfElements;
diff --git a/mlir/lib/Dialect/Utils/IndexingUtils.cpp b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
index e1648ab9..305b06eb 100644
--- a/mlir/lib/Dialect/Utils/IndexingUtils.cpp
+++ b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
@@ -81,21 +81,10 @@ SmallVector<int64_t> mlir::computeElementwiseMul(ArrayRef<int64_t> v1,
return computeElementwiseMulImpl(v1, v2);
}
-int64_t mlir::computeSum(ArrayRef<int64_t> basis) {
- assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) &&
- "basis must be nonnegative");
- if (basis.empty())
- return 0;
- return std::accumulate(basis.begin(), basis.end(), 1, std::plus<int64_t>());
-}
-
int64_t mlir::computeProduct(ArrayRef<int64_t> basis) {
assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) &&
"basis must be nonnegative");
- if (basis.empty())
- return 1;
- return std::accumulate(basis.begin(), basis.end(), 1,
- std::multiplies<int64_t>());
+ return llvm::product_of(basis);
}
int64_t mlir::linearize(ArrayRef<int64_t> offsets, ArrayRef<int64_t> basis) {
@@ -158,19 +147,11 @@ SmallVector<AffineExpr> mlir::computeElementwiseMul(ArrayRef<AffineExpr> v1,
}
AffineExpr mlir::computeSum(MLIRContext *ctx, ArrayRef<AffineExpr> basis) {
- if (basis.empty())
- return getAffineConstantExpr(0, ctx);
- return std::accumulate(basis.begin(), basis.end(),
- getAffineConstantExpr(0, ctx),
- std::plus<AffineExpr>());
+ return llvm::sum_of(basis, getAffineConstantExpr(0, ctx));
}
AffineExpr mlir::computeProduct(MLIRContext *ctx, ArrayRef<AffineExpr> basis) {
- if (basis.empty())
- return getAffineConstantExpr(1, ctx);
- return std::accumulate(basis.begin(), basis.end(),
- getAffineConstantExpr(1, ctx),
- std::multiplies<AffineExpr>());
+ return llvm::product_of(basis, getAffineConstantExpr(1, ctx));
}
AffineExpr mlir::linearize(MLIRContext *ctx, ArrayRef<AffineExpr> offsets,
diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
index 7b2734d..6e9118e 100644
--- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
+++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
@@ -374,11 +374,11 @@ mlir::composeReassociationIndices(
if (consumerReassociations.empty())
return composedIndices;
- size_t consumerDims = std::accumulate(
- consumerReassociations.begin(), consumerReassociations.end(), 0,
- [](size_t all, ReassociationIndicesRef indices) {
- return all + indices.size();
- });
+ size_t consumerDims =
+ llvm::accumulate(consumerReassociations, size_t(0),
+ [](size_t all, ReassociationIndicesRef indices) {
+ return all + indices.size();
+ });
if (producerReassociations.size() != consumerDims)
return std::nullopt;
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index a7e3ba8..58256b0 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -2496,8 +2496,7 @@ struct ToElementsOfBroadcast final : OpRewritePattern<ToElementsOp> {
auto srcElems = vector::ToElementsOp::create(
rewriter, toElementsOp.getLoc(), bcastOp.getSource());
- int64_t dstCount = std::accumulate(dstShape.begin(), dstShape.end(), 1,
- std::multiplies<int64_t>());
+ int64_t dstCount = llvm::product_of(dstShape);
SmallVector<Value> replacements;
replacements.reserve(dstCount);
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
index c5f22b2..0eba0b1 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp
@@ -21,6 +21,7 @@
#include "mlir/IR/Location.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
+#include "llvm/ADT/STLExtras.h"
#include <numeric>
#define DEBUG_TYPE "vector-shape-cast-lowering"
@@ -166,10 +167,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern<vector::ShapeCastOp> {
const VectorType resultType = shapeCast.getResultVectorType();
const ArrayRef<int64_t> resultShape = resultType.getShape();
- const int64_t nSlices =
- std::accumulate(sourceShape.begin(), sourceShape.begin() + sourceDim, 1,
- std::multiplies<int64_t>());
-
+ const int64_t nSlices = llvm::product_of(sourceShape.take_front(sourceDim));
SmallVector<int64_t> extractIndex(sourceDim, 0);
SmallVector<int64_t> insertIndex(resultDim, 0);
Value result = ub::PoisonOp::create(rewriter, loc, resultType);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
index 963b2c8..aa2dd89 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/TypeUtilities.h"
+#include "llvm/ADT/STLExtras.h"
#define DEBUG_TYPE "vector-drop-unit-dim"
@@ -557,8 +558,7 @@ struct CastAwayConstantMaskLeadingOneDim
// If any of the dropped unit dims has a size of `0`, the entire mask is a
// zero mask, else the unit dim has no effect on the mask.
int64_t flatLeadingSize =
- std::accumulate(dimSizes.begin(), dimSizes.begin() + dropDim + 1,
- static_cast<int64_t>(1), std::multiplies<int64_t>());
+ llvm::product_of(dimSizes.take_front(dropDim + 1));
SmallVector<int64_t> newDimSizes = {flatLeadingSize};
newDimSizes.append(dimSizes.begin() + dropDim + 1, dimSizes.end());
diff --git a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
index b72d564..2c56a43 100644
--- a/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
+++ b/mlir/lib/Dialect/XeGPU/Utils/XeGPUUtils.cpp
@@ -52,8 +52,7 @@ mlir::xegpu::getDistributedVectorType(xegpu::TensorDescType tdescTy) {
// compute sgSize by multiply elements of laneLayout
// e.g. for 2D layout, sgSize = laneLayout[0] * laneLayout[1]
// e.g. for 1D layout, sgSize = laneLayout[0]
- auto sgSize = std::accumulate(laneLayout.begin(), laneLayout.end(), 1,
- std::multiplies<int64_t>());
+ int64_t sgSize = llvm::product_of(laneLayout);
// Case 1: regular loads/stores
auto scatterAttr = tdescTy.getEncodingOfType<ScatterTensorDescAttr>();