diff options
author | River Riddle <riverriddle@google.com> | 2019-05-11 18:59:54 -0700 |
---|---|---|
committer | Mehdi Amini <joker.eph@gmail.com> | 2019-05-20 13:37:52 -0700 |
commit | d5b60ee8407d12e51b017c6390af5c17683713e1 (patch) | |
tree | 19f0378cf0c5f571d041bc3814449bf3de292bc5 /mlir/lib/Transforms | |
parent | adca3c2edcdd1375d8c421816ec53044537ccd64 (diff) | |
download | llvm-d5b60ee8407d12e51b017c6390af5c17683713e1.zip llvm-d5b60ee8407d12e51b017c6390af5c17683713e1.tar.gz llvm-d5b60ee8407d12e51b017c6390af5c17683713e1.tar.bz2 |
Replace Operation::isa with llvm::isa.
--
PiperOrigin-RevId: 247789235
Diffstat (limited to 'mlir/lib/Transforms')
-rw-r--r-- | mlir/lib/Transforms/DmaGeneration.cpp | 4 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopFusion.cpp | 26 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopInvariantCodeMotion.cpp | 4 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopUnroll.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/LoopUnrollAndJam.cpp | 4 | ||||
-rw-r--r-- | mlir/lib/Transforms/LowerAffine.cpp | 3 | ||||
-rw-r--r-- | mlir/lib/Transforms/MaterializeVectors.cpp | 10 | ||||
-rw-r--r-- | mlir/lib/Transforms/MemRefDataFlowOpt.cpp | 6 | ||||
-rw-r--r-- | mlir/lib/Transforms/PipelineDataTransfer.cpp | 12 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/LoopUtils.cpp | 5 | ||||
-rw-r--r-- | mlir/lib/Transforms/Utils/Utils.cpp | 13 | ||||
-rw-r--r-- | mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp | 2 | ||||
-rw-r--r-- | mlir/lib/Transforms/Vectorize.cpp | 12 |
14 files changed, 50 insertions, 55 deletions
diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 937399c..00ae92b 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -479,7 +479,7 @@ bool DmaGeneration::runOnBlock(Block *block) { // Get to the first load, store, or for op. auto curBegin = std::find_if(block->begin(), block->end(), [&](Operation &op) { - return op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<AffineForOp>(); + return isa<LoadOp>(op) || isa<StoreOp>(op) || isa<AffineForOp>(op); }); for (auto it = curBegin; it != block->end(); ++it) { @@ -522,7 +522,7 @@ bool DmaGeneration::runOnBlock(Block *block) { runOnBlock(/*begin=*/it, /*end=*/std::next(it)); curBegin = std::next(it); } - } else if (!it->isa<LoadOp>() && !it->isa<StoreOp>()) { + } else if (!isa<LoadOp>(&*it) && !isa<StoreOp>(&*it)) { runOnBlock(/*begin=*/curBegin, /*end=*/it); curBegin = std::next(it); } diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index d430c5d..4e9e48c 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -127,13 +127,13 @@ struct LoopNestStateCollector { void collect(Operation *opToWalk) { opToWalk->walk([&](Operation *op) { - if (op->isa<AffineForOp>()) + if (isa<AffineForOp>(op)) forOps.push_back(cast<AffineForOp>(op)); else if (op->getNumRegions() != 0) hasNonForRegion = true; - else if (op->isa<LoadOp>()) + else if (isa<LoadOp>(op)) loadOpInsts.push_back(op); - else if (op->isa<StoreOp>()) + else if (isa<StoreOp>(op)) storeOpInsts.push_back(op); }); } @@ -141,8 +141,8 @@ struct LoopNestStateCollector { // TODO(b/117228571) Replace when this is modeled through side-effects/op traits static bool isMemRefDereferencingOp(Operation &op) { - if (op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<DmaStartOp>() || - op.isa<DmaWaitOp>()) + if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) || + isa<DmaWaitOp>(op)) return true; return false; } @@ -604,7 +604,7 @@ public: continue; assert(nodes.count(edge.id) > 0); // Skip if 'edge.id' is not a loop nest. - if (!getNode(edge.id)->op->isa<AffineForOp>()) + if (!isa<AffineForOp>(getNode(edge.id)->op)) continue; // Visit current input edge 'edge'. callback(edge); @@ -756,7 +756,7 @@ struct LoopNestStatsCollector { auto *forInst = forOp.getOperation(); auto *parentInst = forOp.getOperation()->getParentOp(); if (parentInst != nullptr) { - assert(parentInst->isa<AffineForOp>() && "Expected parent AffineForOp"); + assert(isa<AffineForOp>(parentInst) && "Expected parent AffineForOp"); // Add mapping to 'forOp' from its parent AffineForOp. stats->loopMap[parentInst].push_back(forOp); } @@ -765,7 +765,7 @@ struct LoopNestStatsCollector { unsigned count = 0; stats->opCountMap[forInst] = 0; for (auto &op : *forOp.getBody()) { - if (!op.isa<AffineForOp>() && !op.isa<AffineIfOp>()) + if (!isa<AffineForOp>(op) && !isa<AffineIfOp>(op)) ++count; } stats->opCountMap[forInst] = count; @@ -1049,7 +1049,7 @@ computeLoopInterchangePermutation(ArrayRef<AffineForOp> loops, // This can increase the loop depth at which we can fuse a slice, since we are // pushing loop carried dependence to a greater depth in the loop nest. static void sinkSequentialLoops(MemRefDependenceGraph::Node *node) { - assert(node->op->isa<AffineForOp>()); + assert(isa<AffineForOp>(node->op)); SmallVector<AffineForOp, 4> loops; AffineForOp curr = cast<AffineForOp>(node->op); getPerfectlyNestedLoops(loops, curr); @@ -1829,7 +1829,7 @@ public: // Get 'dstNode' into which to attempt fusion. auto *dstNode = mdg->getNode(dstId); // Skip if 'dstNode' is not a loop nest. - if (!dstNode->op->isa<AffineForOp>()) + if (!isa<AffineForOp>(dstNode->op)) continue; // Sink sequential loops in 'dstNode' (and thus raise parallel loops) // while preserving relative order. This can increase the maximum loop @@ -1867,7 +1867,7 @@ public: // Get 'srcNode' from which to attempt fusion into 'dstNode'. auto *srcNode = mdg->getNode(srcId); // Skip if 'srcNode' is not a loop nest. - if (!srcNode->op->isa<AffineForOp>()) + if (!isa<AffineForOp>(srcNode->op)) continue; // Skip if 'srcNode' has more than one store to any memref. // TODO(andydavis) Support fusing multi-output src loop nests. @@ -2012,7 +2012,7 @@ public: // Get 'dstNode' into which to attempt fusion. auto *dstNode = mdg->getNode(dstId); // Skip if 'dstNode' is not a loop nest. - if (!dstNode->op->isa<AffineForOp>()) + if (!isa<AffineForOp>(dstNode->op)) continue; // Attempt to fuse 'dstNode' with its sibling nodes in the graph. fuseWithSiblingNodes(dstNode); @@ -2180,7 +2180,7 @@ public: if (outEdge.id == dstNode->id || outEdge.value != inEdge.value) return; auto *sibNode = mdg->getNode(sibNodeId); - if (!sibNode->op->isa<AffineForOp>()) + if (!isa<AffineForOp>(sibNode->op)) return; // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'. if (canFuseWithSibNode(sibNode, outEdge.value)) { diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp index 2f95db9..402f7d9 100644 --- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp +++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp @@ -82,7 +82,7 @@ void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { for (auto &op : *loopBody) { // If the operation is loop invariant, insert it into opsToMove. - if (!op.isa<AffineForOp>() && !op.isa<AffineTerminatorOp>() && + if (!isa<AffineForOp>(op) && !isa<AffineTerminatorOp>(op) && loopDefinedOps.count(&op) != 1) { LLVM_DEBUG(op.print(llvm::dbgs() << "\nLICM'ing op\n")); opsToMove.push_back(&op); @@ -99,7 +99,7 @@ void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) { // If the for loop body has a single operation (the terminator), erase it. if (forOp.getBody()->getOperations().size() == 1) { - assert(forOp.getBody()->getOperations().front().isa<AffineTerminatorOp>()); + assert(isa<AffineTerminatorOp>(forOp.getBody()->front())); forOp.erase(); } } diff --git a/mlir/lib/Transforms/LoopUnroll.cpp b/mlir/lib/Transforms/LoopUnroll.cpp index 1707f78..0595392 100644 --- a/mlir/lib/Transforms/LoopUnroll.cpp +++ b/mlir/lib/Transforms/LoopUnroll.cpp @@ -111,7 +111,7 @@ void LoopUnroll::runOnFunction() { for (auto ®ion : opInst->getRegions()) for (auto &block : region) hasInnerLoops |= walkPostOrder(block.begin(), block.end()); - if (opInst->isa<AffineForOp>()) { + if (isa<AffineForOp>(opInst)) { if (!hasInnerLoops) loops.push_back(cast<AffineForOp>(opInst)); return true; diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp index 43e8f4a..609b424 100644 --- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp @@ -139,12 +139,12 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp, void walk(Block &block) { for (auto it = block.begin(), e = std::prev(block.end()); it != e;) { auto subBlockStart = it; - while (it != e && !it->isa<AffineForOp>()) + while (it != e && !isa<AffineForOp>(&*it)) ++it; if (it != subBlockStart) subBlocks.push_back({subBlockStart, std::prev(it)}); // Process all for insts that appear next. - while (it != e && it->isa<AffineForOp>()) + while (it != e && isa<AffineForOp>(&*it)) walk(&*it++); } } diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index 6f0162e..7f52e85 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -612,8 +612,7 @@ void LowerAffinePass::runOnFunction() { // Collect all the For operations as well as AffineIfOps and AffineApplyOps. // We do this as a prepass to avoid invalidating the walker with our rewrite. getFunction().walk([&](Operation *op) { - if (op->isa<AffineApplyOp>() || op->isa<AffineForOp>() || - op->isa<AffineIfOp>()) + if (isa<AffineApplyOp>(op) || isa<AffineForOp>(op) || isa<AffineIfOp>(op)) instsToRewrite.push_back(op); }); diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index 206ae53b..f81fabb 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -256,7 +256,7 @@ static Value *substitute(Value *v, VectorType hwVectorType, auto it = substitutionsMap->find(v); if (it == substitutionsMap->end()) { auto *opInst = v->getDefiningOp(); - if (opInst->isa<ConstantOp>()) { + if (isa<ConstantOp>(opInst)) { FuncBuilder b(opInst); auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap); auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0))); @@ -407,9 +407,9 @@ materializeAttributes(Operation *opInst, VectorType hwVectorType) { static Operation *instantiate(FuncBuilder *b, Operation *opInst, VectorType hwVectorType, DenseMap<Value *, Value *> *substitutionsMap) { - assert(!opInst->isa<VectorTransferReadOp>() && + assert(!isa<VectorTransferReadOp>(opInst) && "Should call the function specialized for VectorTransferReadOp"); - assert(!opInst->isa<VectorTransferWriteOp>() && + assert(!isa<VectorTransferWriteOp>(opInst) && "Should call the function specialized for VectorTransferWriteOp"); if (opInst->getNumRegions() != 0) return nullptr; @@ -550,7 +550,7 @@ static bool instantiateMaterialization(Operation *op, FuncBuilder b(op); // AffineApplyOp are ignored: instantiating the proper vector op will take // care of AffineApplyOps by composing them properly. - if (op->isa<AffineApplyOp>()) { + if (isa<AffineApplyOp>(op)) { return false; } if (op->getNumRegions() != 0) @@ -749,7 +749,7 @@ void MaterializeVectorsPass::runOnFunction() { // Capture terminators; i.e. vector.transfer_write ops involving a strict // super-vector of subVectorType. auto filter = [subVectorType](Operation &op) { - if (!op.isa<VectorTransferWriteOp>()) { + if (!isa<VectorTransferWriteOp>(op)) { return false; } return matcher::operatesOnSuperVectorsOf(op, subVectorType); diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 118efe5..fcbaeab 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -237,15 +237,15 @@ void MemRefDataFlowOpt::runOnFunction() { for (auto *memref : memrefsToErase) { // If the memref hasn't been alloc'ed in this function, skip. Operation *defInst = memref->getDefiningOp(); - if (!defInst || !defInst->isa<AllocOp>()) + if (!defInst || !isa<AllocOp>(defInst)) // TODO(mlir-team): if the memref was returned by a 'call' operation, we // could still erase it if the call had no side-effects. continue; if (std::any_of(memref->use_begin(), memref->use_end(), [&](OpOperand &use) { auto *ownerInst = use.getOwner(); - return (!ownerInst->isa<StoreOp>() && - !ownerInst->isa<DeallocOp>()); + return (!isa<StoreOp>(ownerInst) && + !isa<DeallocOp>(ownerInst)); })) continue; diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index 272972d..0d4b201 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -57,8 +57,8 @@ FunctionPassBase *mlir::createPipelineDataTransferPass() { // Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are // added. TODO(b/117228571) static unsigned getTagMemRefPos(Operation &dmaInst) { - assert(dmaInst.isa<DmaStartOp>() || dmaInst.isa<DmaWaitOp>()); - if (dmaInst.isa<DmaStartOp>()) { + assert(isa<DmaStartOp>(dmaInst) || isa<DmaWaitOp>(dmaInst)); + if (isa<DmaStartOp>(dmaInst)) { // Second to last operand. return dmaInst.getNumOperands() - 2; } @@ -189,7 +189,7 @@ static void findMatchingStartFinishInsts( SmallVector<Operation *, 4> dmaStartInsts, dmaFinishInsts; for (auto &op : *forOp.getBody()) { // Collect DMA finish operations. - if (op.isa<DmaWaitOp>()) { + if (isa<DmaWaitOp>(op)) { dmaFinishInsts.push_back(&op); continue; } @@ -218,7 +218,7 @@ static void findMatchingStartFinishInsts( bool escapingUses = false; for (const auto &use : memref->getUses()) { // We can double buffer regardless of dealloc's outside the loop. - if (use.getOwner()->isa<DeallocOp>()) + if (isa<DeallocOp>(use.getOwner())) continue; if (!forOp.getBody()->findAncestorInstInBlock(*use.getOwner())) { LLVM_DEBUG(llvm::dbgs() @@ -293,7 +293,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { allocInst->erase(); } else if (oldMemRef->hasOneUse()) { auto *singleUse = oldMemRef->use_begin()->getOwner(); - if (singleUse->isa<DeallocOp>()) { + if (isa<DeallocOp>(singleUse)) { singleUse->erase(); oldMemRef->getDefiningOp()->erase(); } @@ -325,7 +325,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) { DenseMap<Operation *, unsigned> instShiftMap; for (auto &pair : startWaitPairs) { auto *dmaStartInst = pair.first; - assert(dmaStartInst->isa<DmaStartOp>()); + assert(isa<DmaStartOp>(dmaStartInst)); instShiftMap[dmaStartInst] = 0; // Set shifts for DMA start op's affine operand computation slices to 0. SmallVector<AffineApplyOp, 4> sliceOps; diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp index 7fe62a2..fbdee58 100644 --- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp +++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp @@ -173,7 +173,7 @@ bool GreedyPatternRewriteDriver::simplifyFunction(int maxIterations) { if (op->hasNoSideEffect() && op->use_empty()) { // Be careful to update bookkeeping in ConstantHelper to keep // consistency if this is a constant op. - if (op->isa<ConstantOp>()) + if (isa<ConstantOp>(op)) helper.notifyRemoval(op); op->erase(); continue; diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 1ae75b4..d0d564a 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -209,7 +209,7 @@ generateLoop(AffineMap lbMap, AffineMap ubMap, operandMap.map(srcIV, loopChunkIV); } for (auto *op : insts) { - if (!op->isa<AffineTerminatorOp>()) + if (!isa<AffineTerminatorOp>(op)) bodyBuilder.clone(*op, operandMap); } }; @@ -511,7 +511,6 @@ void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) { /// deeper in the loop nest. void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) { for (unsigned i = 0; i < loopDepth; ++i) { - assert(forOp.getBody()->front().isa<AffineForOp>()); AffineForOp nextForOp = cast<AffineForOp>(forOp.getBody()->front()); interchangeLoops(forOp, nextForOp); } @@ -551,7 +550,7 @@ static void cloneLoopBodyInto(AffineForOp forOp, Value *oldIv, if (&op == newForOp.getOperation()) { continue; } - if (op.isa<AffineTerminatorOp>()) { + if (isa<AffineTerminatorOp>(op)) { continue; } auto *instClone = b.clone(op, map); diff --git a/mlir/lib/Transforms/Utils/Utils.cpp b/mlir/lib/Transforms/Utils/Utils.cpp index 1ab821a..00ee955 100644 --- a/mlir/lib/Transforms/Utils/Utils.cpp +++ b/mlir/lib/Transforms/Utils/Utils.cpp @@ -38,8 +38,8 @@ using namespace mlir; // Temporary utility: will be replaced when this is modeled through // side-effects/op traits. TODO(b/117228571) static bool isMemRefDereferencingOp(Operation &op) { - if (op.isa<LoadOp>() || op.isa<StoreOp>() || op.isa<DmaStartOp>() || - op.isa<DmaWaitOp>()) + if (isa<LoadOp>(op) || isa<StoreOp>(op) || isa<DmaStartOp>(op) || + isa<DmaWaitOp>(op)) return true; return false; } @@ -93,7 +93,7 @@ bool mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef, // Skip dealloc's - no replacement is necessary, and a replacement doesn't // hurt dealloc's. - if (opInst->isa<DeallocOp>()) + if (isa<DeallocOp>(opInst)) continue; // Check if the memref was used in a non-deferencing context. It is fine for @@ -225,12 +225,9 @@ void mlir::createAffineComputationSlice( // Collect all operands that are results of affine apply ops. SmallVector<Value *, 4> subOperands; subOperands.reserve(opInst->getNumOperands()); - for (auto *operand : opInst->getOperands()) { - auto *defInst = operand->getDefiningOp(); - if (defInst && defInst->isa<AffineApplyOp>()) { + for (auto *operand : opInst->getOperands()) + if (isa_and_nonnull<AffineApplyOp>(operand->getDefiningOp())) subOperands.push_back(operand); - } - } // Gather sequence of AffineApplyOps reachable from 'subOperands'. SmallVector<Operation *, 4> affineApplyOps; diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp index 20138d5..7b4db1f 100644 --- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp +++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp @@ -231,7 +231,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) { simplifyAffineMap(res).print(outs << "\nComposed map: "); } -static bool affineApplyOp(Operation &op) { return op.isa<AffineApplyOp>(); } +static bool affineApplyOp(Operation &op) { return isa<AffineApplyOp>(op); } static bool singleResultAffineApplyOpWithoutUses(Operation &op) { auto app = dyn_cast<AffineApplyOp>(op); diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 4a58b15..a5bb23f 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -741,14 +741,14 @@ void VectorizationState::registerReplacement(Operation *key, Operation *value) { vectorizedSet.insert(value); vectorizationMap.insert(std::make_pair(key, value)); registerReplacement(key->getResult(0), value->getResult(0)); - if (key->isa<LoadOp>()) { + if (isa<LoadOp>(key)) { assert(roots.count(key) == 0 && "root was already inserted previously"); roots.insert(key); } } void VectorizationState::registerTerminal(Operation *op) { - assert(op->isa<StoreOp>() && "terminal must be a StoreOp"); + assert(isa<StoreOp>(op) && "terminal must be a StoreOp"); assert(terminals.count(op) == 0 && "terminal was already inserted previously"); terminals.insert(op); @@ -800,7 +800,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv, // identity subset of AffineMap and do not change layout. // TODO(ntv): increase the expressiveness power of vector.transfer operations // as needed by various targets. - if (opInst->template isa<LoadOp>()) { + if (isa<LoadOp>(opInst)) { auto permutationMap = makePermutationMap(opInst, state->strategy->loopToVectorDim); if (!permutationMap) @@ -1005,11 +1005,11 @@ static Value *vectorizeOperand(Value *operand, Operation *op, static Operation *vectorizeOneOperation(Operation *opInst, VectorizationState *state) { // Sanity checks. - assert(!opInst->isa<LoadOp>() && + assert(!isa<LoadOp>(opInst) && "all loads must have already been fully vectorized independently"); - assert(!opInst->isa<VectorTransferReadOp>() && + assert(!isa<VectorTransferReadOp>(opInst) && "vector.transfer_read cannot be further vectorized"); - assert(!opInst->isa<VectorTransferWriteOp>() && + assert(!isa<VectorTransferWriteOp>(opInst) && "vector.transfer_write cannot be further vectorized"); if (auto store = dyn_cast<StoreOp>(opInst)) { |