diff options
author | Maksim Levental <maksim.levental@gmail.com> | 2025-07-24 15:34:56 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-07-24 15:34:56 -0500 |
commit | 2f5312563fd5cb2e355ec49109f3e63875337c7c (patch) | |
tree | 1bb0c9722e8756658bc571a03ed7d2a2c20859db | |
parent | 1031f14e921ad54ca17333f59cb19a981f864d95 (diff) | |
download | llvm-2f5312563fd5cb2e355ec49109f3e63875337c7c.zip llvm-2f5312563fd5cb2e355ec49109f3e63875337c7c.tar.gz llvm-2f5312563fd5cb2e355ec49109f3e63875337c7c.tar.bz2 |
[mlir][NFC] update `mlir/Dialect` create APIs (15/n) (#149921)
See https://github.com/llvm/llvm-project/pull/147168 for more info.
23 files changed, 553 insertions, 542 deletions
diff --git a/mlir/lib/Dialect/ArmSME/IR/Utils.cpp b/mlir/lib/Dialect/ArmSME/IR/Utils.cpp index 5f00cef..e5e1312 100644 --- a/mlir/lib/Dialect/ArmSME/IR/Utils.cpp +++ b/mlir/lib/Dialect/ArmSME/IR/Utils.cpp @@ -75,21 +75,21 @@ scf::ForOp createLoopOverTileSlices( PatternRewriter &rewriter, Location loc, Value initTile, std::function<Value(OpBuilder &, Location, Value, Value)> makeLoopBody) { OpBuilder::InsertionGuard g(rewriter); - auto step = rewriter.create<arith::ConstantIndexOp>(loc, 1); - auto minTileSlices = rewriter.create<arith::ConstantIndexOp>( - loc, llvm::cast<VectorType>(initTile.getType()).getDimSize(0)); + auto step = arith::ConstantIndexOp::create(rewriter, loc, 1); + auto minTileSlices = arith::ConstantIndexOp::create( + rewriter, loc, llvm::cast<VectorType>(initTile.getType()).getDimSize(0)); auto vscale = - rewriter.create<vector::VectorScaleOp>(loc, rewriter.getIndexType()); - auto lowerBound = rewriter.create<arith::ConstantIndexOp>(loc, 0); + vector::VectorScaleOp::create(rewriter, loc, rewriter.getIndexType()); + auto lowerBound = arith::ConstantIndexOp::create(rewriter, loc, 0); auto numTileSlices = - rewriter.create<arith::MulIOp>(loc, minTileSlices, vscale); - auto forOp = rewriter.create<scf::ForOp>(loc, lowerBound, numTileSlices, step, - ValueRange{initTile}); + arith::MulIOp::create(rewriter, loc, minTileSlices, vscale); + auto forOp = scf::ForOp::create(rewriter, loc, lowerBound, numTileSlices, + step, ValueRange{initTile}); rewriter.setInsertionPointToStart(forOp.getBody()); Value nextTile = makeLoopBody(rewriter, loc, /*tileSliceIndex=*/forOp.getInductionVar(), /*currentTile=*/forOp.getRegionIterArg(0)); - rewriter.create<scf::YieldOp>(loc, nextTile); + scf::YieldOp::create(rewriter, loc, nextTile); return forOp; } diff --git a/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp index 23f2c2b..9bf0265 100644 --- a/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp +++ b/mlir/lib/Dialect/ArmSME/Transforms/OuterProductFusion.cpp @@ -136,7 +136,7 @@ public: auto loc = op.getLoc(); auto packInputs = [&](Value lhs, Value rhs) { - return rewriter.create<vector::InterleaveOp>(loc, lhs, rhs); + return vector::InterleaveOp::create(rewriter, loc, lhs, rhs); }; auto lhs = packInputs(op1.getLhs().getDefiningOp()->getOperand(0), @@ -284,7 +284,7 @@ public: auto loc = op.getLoc(); auto packInputs = [&](Value lhs, Value rhs) { - return rewriter.create<vector::InterleaveOp>(loc, lhs, rhs); + return vector::InterleaveOp::create(rewriter, loc, lhs, rhs); }; auto lhs0 = packInputs(op1.getLhs().getDefiningOp()->getOperand(0), @@ -456,8 +456,8 @@ struct SwapVectorExtractOfArithExtend Value extendSource = extendOp->getOperand(0); // Create new extract from source of extend. - Value newExtract = rewriter.create<vector::ExtractOp>( - loc, extendSource, extractOp.getMixedPosition()); + Value newExtract = vector::ExtractOp::create(rewriter, loc, extendSource, + extractOp.getMixedPosition()); // Extend new extract to original result type. Operation *newExtend = @@ -503,8 +503,9 @@ struct SwapVectorScalableExtractOfArithExtend // Create new extract from source of extend. VectorType extractResultVectorType = resultType.clone(extendSourceVectorType.getElementType()); - Value newExtract = rewriter.create<vector::ScalableExtractOp>( - loc, extractResultVectorType, extendSource, extractOp.getPos()); + Value newExtract = vector::ScalableExtractOp::create( + rewriter, loc, extractResultVectorType, extendSource, + extractOp.getPos()); // Extend new extract to original result type. Operation *newExtend = diff --git a/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp b/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp index b3c988d..d925c19 100644 --- a/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp +++ b/mlir/lib/Dialect/ArmSME/Transforms/TileAllocation.cpp @@ -210,7 +210,7 @@ void splitCondBranches(IRRewriter &rewriter, FunctionOpInterface function) { auto insertJump = [&](Location loc, Block *source, Block *dest, auto args) { rewriter.setInsertionPointToEnd(source); - rewriter.create<cf::BranchOp>(loc, dest, args); + cf::BranchOp::create(rewriter, loc, dest, args); }; for (auto condBranch : worklist) { @@ -253,7 +253,7 @@ void insertCopiesAtBranches(IRRewriter &rewriter, for (OpOperand &operand : terminator->getOpOperands()) { if (isValidSMETileVectorType(operand.get().getType())) { auto copy = - rewriter.create<CopyTileOp>(terminator->getLoc(), operand.get()); + CopyTileOp::create(rewriter, terminator->getLoc(), operand.get()); rewriter.modifyOpInPlace(terminator, [&] { operand.assign(copy); }); } } diff --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp index 1e8e126..1c0eced 100644 --- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp +++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp @@ -82,13 +82,14 @@ SmallVector<Value, 2> addConstantScalableOffset(OpBuilder &builder, Location loc, ValueRange indices, ArrayRef<int> scalableOffsets) { - auto vscale = builder.create<vector::VectorScaleOp>(loc); + auto vscale = vector::VectorScaleOp::create(builder, loc); return llvm::map_to_vector( llvm::zip_equal(indices, scalableOffsets), [&](auto pair) -> Value { auto [index, base] = pair; - auto offset = builder.create<arith::MulIOp>( - loc, builder.create<arith::ConstantIndexOp>(loc, base), vscale); - return builder.create<arith::AddIOp>(loc, index, offset); + auto offset = arith::MulIOp::create( + builder, loc, arith::ConstantIndexOp::create(builder, loc, base), + vscale); + return arith::AddIOp::create(builder, loc, index, offset); }); } @@ -132,8 +133,8 @@ Value extractSMEMask(OpBuilder &builder, Location loc, Value mask, // from the mask operands to get the parameters for this sub-tile. auto smeTileMaskDims = addConstantScalableOffset( builder, loc, createMask.getOperands(), {-smeTile.row, -smeTile.col}); - auto smeTileCreateMask = builder.create<vector::CreateMaskOp>( - loc, smeTile.type.clone(builder.getI1Type()), smeTileMaskDims); + auto smeTileCreateMask = vector::CreateMaskOp::create( + builder, loc, smeTile.type.clone(builder.getI1Type()), smeTileMaskDims); return smeTileCreateMask.getResult(); } @@ -190,8 +191,8 @@ struct LegalizeArithConstantOpsByDecomposition auto smeTileType = getSMETileTypeForElement(vectorType.getElementType()); auto tileCount = getNumberOfSMETilesForVectorType(vectorType); - auto tileSplat = rewriter.create<arith::ConstantOp>( - constantOp.getLoc(), denseAttr.resizeSplat(smeTileType)); + auto tileSplat = arith::ConstantOp::create( + rewriter, constantOp.getLoc(), denseAttr.resizeSplat(smeTileType)); SmallVector<Value> repl(tileCount, tileSplat); rewriter.replaceOpWithMultiple(constantOp, {repl}); @@ -237,12 +238,12 @@ struct LegalizeVectorOuterProductOpsByDecomposition decomposeToSMETiles(rewriter, vectorType, smeTileType))) { auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile); - auto lhs = rewriter.create<vector::ScalableExtractOp>( - loc, sliceType, outerProductOp.getLhs(), smeTile.row); - auto rhs = rewriter.create<vector::ScalableExtractOp>( - loc, sliceType, outerProductOp.getRhs(), smeTile.col); - auto smeOuterProduct = rewriter.create<vector::OuterProductOp>( - loc, smeTileType, lhs, rhs, + auto lhs = vector::ScalableExtractOp::create( + rewriter, loc, sliceType, outerProductOp.getLhs(), smeTile.row); + auto rhs = vector::ScalableExtractOp::create( + rewriter, loc, sliceType, outerProductOp.getRhs(), smeTile.col); + auto smeOuterProduct = vector::OuterProductOp::create( + rewriter, loc, smeTileType, lhs, rhs, !accSMETiles.empty() ? accSMETiles[index] : Value{}, outerProductOp.getKind()); @@ -314,8 +315,8 @@ struct LegalizeTransferReadOpsByDecomposition for (SMESubTile smeTile : decomposeToSMETiles(rewriter, vectorType, smeTileType, transposed)) { auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile); - auto smeRead = rewriter.create<vector::TransferReadOp>( - loc, smeTileType, readOp.getBase(), + auto smeRead = vector::TransferReadOp::create( + rewriter, loc, smeTileType, readOp.getBase(), getSMESubTileIndices(rewriter, loc, readOp.getIndices(), smeTile), readOp.getPermutationMapAttr(), readOp.getPadding(), smeMask, readOp.getInBoundsAttr()); @@ -363,8 +364,8 @@ struct LegalizeTransferWriteOpsByDecomposition for (auto [index, smeTile] : llvm::enumerate(decomposeToSMETiles( rewriter, vectorType, smeTileType, transposed))) { auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile); - auto smeWrite = rewriter.create<vector::TransferWriteOp>( - loc, inputSMETiles[index], destTensorOrMemref, + auto smeWrite = vector::TransferWriteOp::create( + rewriter, loc, inputSMETiles[index], destTensorOrMemref, getSMESubTileIndices(rewriter, loc, writeOp.getIndices(), smeTile), writeOp.getPermutationMapAttr(), smeMask, writeOp.getInBoundsAttr()); if (writeOp.hasPureTensorSemantics()) @@ -456,11 +457,11 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop VectorType::get(minTileSlices, rewriter.getI1Type(), true); // Create loop over all tile slices. - auto lowerBound = rewriter.create<arith::ConstantIndexOp>(loc, 0); + auto lowerBound = arith::ConstantIndexOp::create(rewriter, loc, 0); auto upperBound = createVscaleMultiple(minTileSlices); - auto step = rewriter.create<arith::ConstantIndexOp>(loc, 1); + auto step = arith::ConstantIndexOp::create(rewriter, loc, 1); auto storeLoop = - rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step); + scf::ForOp::create(rewriter, loc, lowerBound, upperBound, step); rewriter.setInsertionPointToStart(storeLoop.getBody()); // For each sub-tile of the multi-tile `vectorType`. @@ -474,30 +475,31 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop // The current slice of `vectorType` we are processing. auto sliceIndex = - rewriter.create<arith::AddIOp>(loc, tileRow, tileSliceIndex); + arith::AddIOp::create(rewriter, loc, tileRow, tileSliceIndex); // Where in the destination memref the current slice will be stored. - auto storeRow = rewriter.create<arith::AddIOp>(loc, sliceIndex, - writeOp.getIndices()[0]); - auto storeCol = - rewriter.create<arith::AddIOp>(loc, tileCol, writeOp.getIndices()[1]); + auto storeRow = arith::AddIOp::create(rewriter, loc, sliceIndex, + writeOp.getIndices()[0]); + auto storeCol = arith::AddIOp::create(rewriter, loc, tileCol, + writeOp.getIndices()[1]); // Extract the mask for the current slice. Value sliceMask = nullptr; if (mask) { - sliceMask = rewriter.create<vector::ExtractOp>( - loc, mask, OpFoldResult(sliceIndex)); + sliceMask = vector::ExtractOp::create(rewriter, loc, mask, + OpFoldResult(sliceIndex)); if (sliceMaskType != sliceMask.getType()) - sliceMask = rewriter.create<vector::ScalableExtractOp>( - loc, sliceMaskType, sliceMask, smeTile.col); + sliceMask = vector::ScalableExtractOp::create( + rewriter, loc, sliceMaskType, sliceMask, smeTile.col); } // Extract and store the current slice. Value tile = inputSMETiles[index]; auto slice = - rewriter.create<vector::ExtractOp>(loc, tile, tileSliceIndex); - rewriter.create<vector::TransferWriteOp>( - loc, slice, writeOp.getBase(), ValueRange{storeRow, storeCol}, + vector::ExtractOp::create(rewriter, loc, tile, tileSliceIndex); + vector::TransferWriteOp::create( + rewriter, loc, slice, writeOp.getBase(), + ValueRange{storeRow, storeCol}, AffineMapAttr::get(writeOp.getPermutationMap().dropResult(0)), sliceMask, rewriter.getBoolArrayAttr( @@ -567,14 +569,15 @@ struct FoldExtractFromVectorOfSMELikeCreateMasks extractOp, "constant vector.create_masks dims should be folded elsewhere"); - auto zero = rewriter.create<arith::ConstantIndexOp>(loc, 0); + auto zero = arith::ConstantIndexOp::create(rewriter, loc, 0); auto extractionIndex = getValueOrCreateConstantIndexOp( rewriter, loc, extractOp.getMixedPosition()[0]); - auto extractionInTrueRegion = rewriter.create<arith::CmpIOp>( - loc, rewriter.getI1Type(), arith::CmpIPredicate::slt, extractionIndex, - frontMaskDim); - auto newMaskFrontDim = rewriter.create<arith::SelectOp>( - loc, extractionInTrueRegion, createMaskOp.getOperand(1), zero); + auto extractionInTrueRegion = arith::CmpIOp::create( + rewriter, loc, rewriter.getI1Type(), arith::CmpIPredicate::slt, + extractionIndex, frontMaskDim); + auto newMaskFrontDim = + arith::SelectOp::create(rewriter, loc, extractionInTrueRegion, + createMaskOp.getOperand(1), zero); rewriter.replaceOpWithNewOp<vector::CreateMaskOp>( extractOp, extractedMaskType, @@ -660,8 +663,8 @@ struct LiftIllegalVectorTransposeToMemory illegalRead, "expected read to have identity permutation map"); auto loc = transposeOp.getLoc(); - auto zero = rewriter.create<arith::ConstantIndexOp>(loc, 0); - auto one = rewriter.create<arith::ConstantIndexOp>(loc, 1); + auto zero = arith::ConstantIndexOp::create(rewriter, loc, 0); + auto one = arith::ConstantIndexOp::create(rewriter, loc, 1); // Create a subview that matches the size of the illegal read vector type. auto readType = illegalRead.getVectorType(); @@ -669,16 +672,16 @@ struct LiftIllegalVectorTransposeToMemory llvm::zip_equal(readType.getShape(), readType.getScalableDims()), [&](auto dim) -> Value { auto [size, isScalable] = dim; - auto dimSize = rewriter.create<arith::ConstantIndexOp>(loc, size); + auto dimSize = arith::ConstantIndexOp::create(rewriter, loc, size); if (!isScalable) return dimSize; - auto vscale = rewriter.create<vector::VectorScaleOp>(loc); - return rewriter.create<arith::MulIOp>(loc, vscale, dimSize); + auto vscale = vector::VectorScaleOp::create(rewriter, loc); + return arith::MulIOp::create(rewriter, loc, vscale, dimSize); }); SmallVector<Value> strides(readType.getRank(), Value(one)); - auto readSubview = rewriter.create<memref::SubViewOp>( - loc, illegalRead.getBase(), illegalRead.getIndices(), readSizes, - strides); + auto readSubview = + memref::SubViewOp::create(rewriter, loc, illegalRead.getBase(), + illegalRead.getIndices(), readSizes, strides); // Apply the transpose to all values/attributes of the transfer_read: // - The mask @@ -686,14 +689,14 @@ struct LiftIllegalVectorTransposeToMemory if (mask) { // Note: The transpose for the mask should fold into the // vector.create_mask/constant_mask op, which will then become legal. - mask = rewriter.create<vector::TransposeOp>(loc, mask, - transposeOp.getPermutation()); + mask = vector::TransposeOp::create(rewriter, loc, mask, + transposeOp.getPermutation()); } // - The source memref mlir::AffineMap transposeMap = AffineMap::getPermutationMap( transposeOp.getPermutation(), getContext()); - auto transposedSubview = rewriter.create<memref::TransposeOp>( - loc, readSubview, AffineMapAttr::get(transposeMap)); + auto transposedSubview = memref::TransposeOp::create( + rewriter, loc, readSubview, AffineMapAttr::get(transposeMap)); ArrayAttr inBoundsAttr = illegalRead.getInBoundsAttr(); // - The `in_bounds` attribute if (inBoundsAttr) { @@ -706,8 +709,8 @@ struct LiftIllegalVectorTransposeToMemory VectorType legalReadType = resultType.clone(readType.getElementType()); // Note: The indices are all zero as the subview is already offset. SmallVector<Value> readIndices(illegalRead.getIndices().size(), zero); - auto legalRead = rewriter.create<vector::TransferReadOp>( - loc, legalReadType, transposedSubview, readIndices, + auto legalRead = vector::TransferReadOp::create( + rewriter, loc, legalReadType, transposedSubview, readIndices, illegalRead.getPermutationMapAttr(), illegalRead.getPadding(), mask, inBoundsAttr); @@ -797,12 +800,12 @@ struct LowerIllegalTransposeStoreViaZA AffineMap::getPermutationMap(ArrayRef<int64_t>{1, 0}, getContext())); // Note: We need to use `get_tile` as there's no vector-level `undef`. - Value undefTile = rewriter.create<arm_sme::GetTileOp>(loc, smeTileType); + Value undefTile = arm_sme::GetTileOp::create(rewriter, loc, smeTileType); Value destTensorOrMemref = writeOp.getBase(); auto numSlicesPerTile = std::min(sourceType.getDimSize(0), smeTileType.getDimSize(0)); auto numSlices = - rewriter.create<arith::ConstantIndexOp>(loc, numSlicesPerTile); + arith::ConstantIndexOp::create(rewriter, loc, numSlicesPerTile); for (auto [index, smeTile] : llvm::enumerate( decomposeToSMETiles(rewriter, sourceType, smeTileType))) { // 1. _Deliberately_ drop a scalable dimension and insert a fixed number @@ -811,47 +814,47 @@ struct LowerIllegalTransposeStoreViaZA // rows of the tile after 1*vscale rows. Value tile = undefTile; for (int d = 0; d < numSlicesPerTile; ++d) { - Value vector = rewriter.create<vector::ExtractOp>( - loc, transposeOp.getVector(), - rewriter.getIndexAttr(d + smeTile.row)); + Value vector = + vector::ExtractOp::create(rewriter, loc, transposeOp.getVector(), + rewriter.getIndexAttr(d + smeTile.row)); if (vector.getType() != smeSliceType) { - vector = rewriter.create<vector::ScalableExtractOp>( - loc, smeSliceType, vector, smeTile.col); + vector = vector::ScalableExtractOp::create( + rewriter, loc, smeSliceType, vector, smeTile.col); } - tile = rewriter.create<vector::InsertOp>(loc, vector, tile, d); + tile = vector::InsertOp::create(rewriter, loc, vector, tile, d); } // 2. Transpose the tile position. auto transposedRow = createVscaleMultiple(smeTile.col); auto transposedCol = - rewriter.create<arith::ConstantIndexOp>(loc, smeTile.row); + arith::ConstantIndexOp::create(rewriter, loc, smeTile.row); // 3. Compute mask for tile store. Value maskRows; Value maskCols; if (auto mask = writeOp.getMask()) { auto createMask = mask.getDefiningOp<vector::CreateMaskOp>(); - maskRows = rewriter.create<arith::SubIOp>(loc, createMask.getOperand(0), - transposedRow); - maskCols = rewriter.create<arith::SubIOp>(loc, createMask.getOperand(1), - transposedCol); - maskCols = rewriter.create<index::MinSOp>(loc, maskCols, numSlices); + maskRows = arith::SubIOp::create( + rewriter, loc, createMask.getOperand(0), transposedRow); + maskCols = arith::SubIOp::create( + rewriter, loc, createMask.getOperand(1), transposedCol); + maskCols = index::MinSOp::create(rewriter, loc, maskCols, numSlices); } else { maskRows = createVscaleMultiple(smeTileType.getDimSize(0)); maskCols = numSlices; } - auto subMask = rewriter.create<vector::CreateMaskOp>( - loc, smeTileType.clone(rewriter.getI1Type()), + auto subMask = vector::CreateMaskOp::create( + rewriter, loc, smeTileType.clone(rewriter.getI1Type()), ValueRange{maskRows, maskCols}); // 4. Emit a transposed tile write. auto writeIndices = writeOp.getIndices(); Value destRow = - rewriter.create<arith::AddIOp>(loc, transposedRow, writeIndices[0]); + arith::AddIOp::create(rewriter, loc, transposedRow, writeIndices[0]); Value destCol = - rewriter.create<arith::AddIOp>(loc, transposedCol, writeIndices[1]); - auto smeWrite = rewriter.create<vector::TransferWriteOp>( - loc, tile, destTensorOrMemref, ValueRange{destRow, destCol}, + arith::AddIOp::create(rewriter, loc, transposedCol, writeIndices[1]); + auto smeWrite = vector::TransferWriteOp::create( + rewriter, loc, tile, destTensorOrMemref, ValueRange{destRow, destCol}, transposeMap, subMask, writeOp.getInBounds()); if (writeOp.hasPureTensorSemantics()) @@ -934,42 +937,42 @@ struct LowerColumnTransferReadToLoops // Create a loop over all rows and load one element at a time. auto loc = readOp.getLoc(); - auto lowerBound = rewriter.create<arith::ConstantIndexOp>(loc, 0); + auto lowerBound = arith::ConstantIndexOp::create(rewriter, loc, 0); auto createVscaleMultiple = vector::makeVscaleConstantBuilder(rewriter, loc); auto upperBound = createVscaleMultiple(numRows); - auto step = rewriter.create<arith::ConstantIndexOp>(loc, 1); - Value init = rewriter.create<arith::ConstantOp>( - loc, newResType, DenseElementsAttr::get(newResType, 0.0f)); + auto step = arith::ConstantIndexOp::create(rewriter, loc, 1); + Value init = arith::ConstantOp::create( + rewriter, loc, newResType, DenseElementsAttr::get(newResType, 0.0f)); scf::ForOp loadLoop; { OpBuilder::InsertionGuard g(rewriter); - loadLoop = rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step, - ValueRange{init}); + loadLoop = scf::ForOp::create(rewriter, loc, lowerBound, upperBound, step, + ValueRange{init}); rewriter.setInsertionPointToStart(loadLoop.getBody()); auto tileSliceIndex = loadLoop.getInductionVar(); - auto idx0 = rewriter.create<arith::AddIOp>(loc, tileSliceIndex, - readOp.getIndices()[0]); + auto idx0 = arith::AddIOp::create(rewriter, loc, tileSliceIndex, + readOp.getIndices()[0]); auto idx1 = readOp.getIndices()[1]; - Value scalar = rewriter.create<memref::LoadOp>( - loc, readOp.getBase(), SmallVector<Value>({idx0, idx1})); + Value scalar = memref::LoadOp::create(rewriter, loc, readOp.getBase(), + SmallVector<Value>({idx0, idx1})); - Operation *updateInit = rewriter.create<vector::InsertOp>( - loc, scalar, loadLoop.getRegionIterArg(0), tileSliceIndex); + Operation *updateInit = vector::InsertOp::create( + rewriter, loc, scalar, loadLoop.getRegionIterArg(0), tileSliceIndex); - rewriter.create<scf::YieldOp>(loc, updateInit->getResult(0)); + scf::YieldOp::create(rewriter, loc, updateInit->getResult(0)); } // The read operation has been "legalized", but since the original result // type was a 2D vector, we need to cast before returning the result. This // ShapeCast should cancel-out with some other ShapeCast (i.e. it's a // no-op). - auto sc = rewriter.create<vector::ShapeCastOp>( - loc, readOp.getResult().getType(), loadLoop.getResult(0)); + auto sc = vector::ShapeCastOp::create( + rewriter, loc, readOp.getResult().getType(), loadLoop.getResult(0)); rewriter.replaceOp(readOp, sc); diff --git a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp index 7b64e57..a7c6981 100644 --- a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp +++ b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeForLLVMExport.cpp @@ -87,8 +87,8 @@ struct SvboolConversionOpLowering : public ConvertOpToLLVMPattern<Op> { VectorType sourceType = source.getType(); VectorType resultType = convertOp.getResult().getType(); - Value result = rewriter.create<arith::ConstantOp>( - loc, resultType, rewriter.getZeroAttr(resultType)); + Value result = arith::ConstantOp::create(rewriter, loc, resultType, + rewriter.getZeroAttr(resultType)); // We want to iterate over the input vector in steps of the trailing // dimension. So this creates tile shape where all leading dimensions are 1, @@ -100,15 +100,15 @@ struct SvboolConversionOpLowering : public ConvertOpToLLVMPattern<Op> { for (SmallVector<int64_t> index : StaticTileOffsetRange(sourceType.getShape(), tileShape)) { auto extractOrInsertPosition = ArrayRef(index).drop_back(); - auto sourceVector = rewriter.create<vector::ExtractOp>( - loc, source, extractOrInsertPosition); + auto sourceVector = vector::ExtractOp::create(rewriter, loc, source, + extractOrInsertPosition); VectorType convertedType = VectorType::Builder(llvm::cast<VectorType>(sourceVector.getType())) .setDim(0, resultType.getShape().back()); auto convertedVector = - rewriter.create<IntrOp>(loc, TypeRange{convertedType}, sourceVector); - result = rewriter.create<vector::InsertOp>(loc, convertedVector, result, - extractOrInsertPosition); + IntrOp::create(rewriter, loc, TypeRange{convertedType}, sourceVector); + result = vector::InsertOp::create(rewriter, loc, convertedVector, result, + extractOrInsertPosition); } rewriter.replaceOp(convertOp, result); @@ -135,12 +135,12 @@ struct PselOpLowering : public ConvertOpToLLVMPattern<PselOp> { ConversionPatternRewriter &rewriter) const override { auto svboolType = VectorType::get(16, rewriter.getI1Type(), true); auto loc = pselOp.getLoc(); - auto svboolP1 = rewriter.create<ConvertToSvboolIntrOp>(loc, svboolType, - adaptor.getP1()); - auto indexI32 = rewriter.create<arith::IndexCastOp>( - loc, rewriter.getI32Type(), pselOp.getIndex()); - auto pselIntr = rewriter.create<PselIntrOp>(loc, svboolType, svboolP1, - pselOp.getP2(), indexI32); + auto svboolP1 = ConvertToSvboolIntrOp::create(rewriter, loc, svboolType, + adaptor.getP1()); + auto indexI32 = arith::IndexCastOp::create( + rewriter, loc, rewriter.getI32Type(), pselOp.getIndex()); + auto pselIntr = PselIntrOp::create(rewriter, loc, svboolType, svboolP1, + pselOp.getP2(), indexI32); rewriter.replaceOpWithNewOp<ConvertFromSvboolIntrOp>( pselOp, adaptor.getP1().getType(), pselIntr); return success(); @@ -174,7 +174,7 @@ struct CreateMaskOpLowering "not SVE predicate-sized"); auto loc = createMaskOp.getLoc(); - auto zero = rewriter.create<LLVM::ZeroOp>(loc, rewriter.getI64Type()); + auto zero = LLVM::ZeroOp::create(rewriter, loc, rewriter.getI64Type()); rewriter.replaceOpWithNewOp<WhileLTIntrOp>(createMaskOp, maskType, zero, adaptor.getOperands()[0]); return success(); diff --git a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeVectorStorage.cpp b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeVectorStorage.cpp index 3dbb93b..3a409ad 100644 --- a/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeVectorStorage.cpp +++ b/mlir/lib/Dialect/ArmSVE/Transforms/LegalizeVectorStorage.cpp @@ -71,8 +71,8 @@ void replaceOpWithUnrealizedConversion(PatternRewriter &rewriter, TOp op, TLegalizerCallback callback) { replaceOpWithLegalizedOp(rewriter, op, [&](TOp newOp) { // Mark our `unrealized_conversion_casts` with a pass label. - return rewriter.create<UnrealizedConversionCastOp>( - op.getLoc(), TypeRange{op.getResult().getType()}, + return UnrealizedConversionCastOp::create( + rewriter, op.getLoc(), TypeRange{op.getResult().getType()}, ValueRange{callback(newOp)}, NamedAttribute(rewriter.getStringAttr(kSVELegalizerTag), rewriter.getUnitAttr())); @@ -239,8 +239,8 @@ struct LegalizeSVEMaskStoreConversion auto legalMaskType = widenScalableMaskTypeToSvbool( llvm::cast<VectorType>(valueToStore.getType())); - auto convertToSvbool = rewriter.create<arm_sve::ConvertToSvboolOp>( - loc, legalMaskType, valueToStore); + auto convertToSvbool = arm_sve::ConvertToSvboolOp::create( + rewriter, loc, legalMaskType, valueToStore); // Replace this store with a conversion to a storable svbool mask [1], // followed by a wider store. replaceOpWithLegalizedOp(rewriter, storeOp, @@ -290,8 +290,8 @@ struct LegalizeSVEMaskLoadConversion : public OpRewritePattern<memref::LoadOp> { replaceOpWithLegalizedOp(rewriter, loadOp, [&](memref::LoadOp newLoadOp) { newLoadOp.setMemRef(*legalMemref); newLoadOp.getResult().setType(legalMaskType); - return rewriter.create<arm_sve::ConvertFromSvboolOp>( - loc, loadedMask.getType(), newLoadOp); + return arm_sve::ConvertFromSvboolOp::create( + rewriter, loc, loadedMask.getType(), newLoadOp); }); return success(); @@ -408,8 +408,8 @@ struct LegalizeTransferRead : public OpRewritePattern<vector::TransferReadOp> { reassoc.back().push_back(i); if (!memref::CollapseShapeOp::isGuaranteedCollapsible(memTy, reassoc)) return failure(); - Value collapsedMem = rewriter.create<memref::CollapseShapeOp>( - readOp.getLoc(), readOp.getBase(), reassoc); + Value collapsedMem = memref::CollapseShapeOp::create( + rewriter, readOp.getLoc(), readOp.getBase(), reassoc); // Get a vector type with collapsed trailing dimensions. SmallVector<int64_t> shape(origVT.getShape()); @@ -424,14 +424,14 @@ struct LegalizeTransferRead : public OpRewritePattern<vector::TransferReadOp> { auto indices = readOp.getIndices().drop_back(numCollapseDims - 1); // Create the new `transfer_read`. - auto newReadOp = rewriter.create<vector::TransferReadOp>( - readOp.getLoc(), collapsedVT, collapsedMem, indices, + auto newReadOp = vector::TransferReadOp::create( + rewriter, readOp.getLoc(), collapsedVT, collapsedMem, indices, readOp.getPadding(), ArrayRef<bool>(origInBounds).drop_back(numCollapseDims - 1)); // Cast back to the original vector type. - auto toOrigShape = rewriter.create<vector::ShapeCastOp>(readOp.getLoc(), - origVT, newReadOp); + auto toOrigShape = vector::ShapeCastOp::create(rewriter, readOp.getLoc(), + origVT, newReadOp); rewriter.replaceOp(readOp, toOrigShape); return success(); diff --git a/mlir/lib/Dialect/Async/IR/Async.cpp b/mlir/lib/Dialect/Async/IR/Async.cpp index 08a57db..dc7b07d 100644 --- a/mlir/lib/Dialect/Async/IR/Async.cpp +++ b/mlir/lib/Dialect/Async/IR/Async.cpp @@ -97,7 +97,7 @@ void ExecuteOp::build(OpBuilder &builder, OperationState &result, // expected result is empty. Otherwise, leave this to the caller // because we don't know which values to return from the execute op. if (resultTypes.empty() && !bodyBuilder) { - builder.create<async::YieldOp>(result.location, ValueRange()); + async::YieldOp::create(builder, result.location, ValueRange()); } else if (bodyBuilder) { bodyBuilder(builder, result.location, bodyBlock->getArguments()); } diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp index bf6bfe2a..96283cd 100644 --- a/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp +++ b/mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp @@ -190,8 +190,8 @@ static SmallVector<Value> delinearize(ImplicitLocOpBuilder &b, Value index, assert(!tripCounts.empty() && "tripCounts must be not empty"); for (ssize_t i = tripCounts.size() - 1; i >= 0; --i) { - coords[i] = b.create<arith::RemSIOp>(index, tripCounts[i]); - index = b.create<arith::DivSIOp>(index, tripCounts[i]); + coords[i] = arith::RemSIOp::create(b, index, tripCounts[i]); + index = arith::DivSIOp::create(b, index, tripCounts[i]); } return coords; @@ -275,15 +275,15 @@ static ParallelComputeFunction createParallelComputeFunction( BlockArgument blockSize = args.blockSize(); // Constants used below. - Value c0 = b.create<arith::ConstantIndexOp>(0); - Value c1 = b.create<arith::ConstantIndexOp>(1); + Value c0 = arith::ConstantIndexOp::create(b, 0); + Value c1 = arith::ConstantIndexOp::create(b, 1); // Materialize known constants as constant operation in the function body. auto values = [&](ArrayRef<BlockArgument> args, ArrayRef<IntegerAttr> attrs) { return llvm::to_vector( llvm::map_range(llvm::zip(args, attrs), [&](auto tuple) -> Value { if (IntegerAttr attr = std::get<1>(tuple)) - return b.create<arith::ConstantOp>(attr); + return arith::ConstantOp::create(b, attr); return std::get<0>(tuple); })); }; @@ -302,17 +302,17 @@ static ParallelComputeFunction createParallelComputeFunction( // one-dimensional iteration space. Value tripCount = tripCounts[0]; for (unsigned i = 1; i < tripCounts.size(); ++i) - tripCount = b.create<arith::MulIOp>(tripCount, tripCounts[i]); + tripCount = arith::MulIOp::create(b, tripCount, tripCounts[i]); // Find one-dimensional iteration bounds: [blockFirstIndex, blockLastIndex]: // blockFirstIndex = blockIndex * blockSize - Value blockFirstIndex = b.create<arith::MulIOp>(blockIndex, blockSize); + Value blockFirstIndex = arith::MulIOp::create(b, blockIndex, blockSize); // The last one-dimensional index in the block defined by the `blockIndex`: // blockLastIndex = min(blockFirstIndex + blockSize, tripCount) - 1 - Value blockEnd0 = b.create<arith::AddIOp>(blockFirstIndex, blockSize); - Value blockEnd1 = b.create<arith::MinSIOp>(blockEnd0, tripCount); - Value blockLastIndex = b.create<arith::SubIOp>(blockEnd1, c1); + Value blockEnd0 = arith::AddIOp::create(b, blockFirstIndex, blockSize); + Value blockEnd1 = arith::MinSIOp::create(b, blockEnd0, tripCount); + Value blockLastIndex = arith::SubIOp::create(b, blockEnd1, c1); // Convert one-dimensional indices to multi-dimensional coordinates. auto blockFirstCoord = delinearize(b, blockFirstIndex, tripCounts); @@ -325,7 +325,7 @@ static ParallelComputeFunction createParallelComputeFunction( // dimension when inner compute dimension contains multiple blocks. SmallVector<Value> blockEndCoord(op.getNumLoops()); for (size_t i = 0; i < blockLastCoord.size(); ++i) - blockEndCoord[i] = b.create<arith::AddIOp>(blockLastCoord[i], c1); + blockEndCoord[i] = arith::AddIOp::create(b, blockLastCoord[i], c1); // Construct a loop nest out of scf.for operations that will iterate over // all coordinates in [blockFirstCoord, blockLastCoord] range. @@ -368,21 +368,22 @@ static ParallelComputeFunction createParallelComputeFunction( ImplicitLocOpBuilder b(loc, nestedBuilder); // Compute induction variable for `loopIdx`. - computeBlockInductionVars[loopIdx] = b.create<arith::AddIOp>( - lowerBounds[loopIdx], b.create<arith::MulIOp>(iv, steps[loopIdx])); + computeBlockInductionVars[loopIdx] = + arith::AddIOp::create(b, lowerBounds[loopIdx], + arith::MulIOp::create(b, iv, steps[loopIdx])); // Check if we are inside first or last iteration of the loop. - isBlockFirstCoord[loopIdx] = b.create<arith::CmpIOp>( - arith::CmpIPredicate::eq, iv, blockFirstCoord[loopIdx]); - isBlockLastCoord[loopIdx] = b.create<arith::CmpIOp>( - arith::CmpIPredicate::eq, iv, blockLastCoord[loopIdx]); + isBlockFirstCoord[loopIdx] = arith::CmpIOp::create( + b, arith::CmpIPredicate::eq, iv, blockFirstCoord[loopIdx]); + isBlockLastCoord[loopIdx] = arith::CmpIOp::create( + b, arith::CmpIPredicate::eq, iv, blockLastCoord[loopIdx]); // Check if the previous loop is in its first or last iteration. if (loopIdx > 0) { - isBlockFirstCoord[loopIdx] = b.create<arith::AndIOp>( - isBlockFirstCoord[loopIdx], isBlockFirstCoord[loopIdx - 1]); - isBlockLastCoord[loopIdx] = b.create<arith::AndIOp>( - isBlockLastCoord[loopIdx], isBlockLastCoord[loopIdx - 1]); + isBlockFirstCoord[loopIdx] = arith::AndIOp::create( + b, isBlockFirstCoord[loopIdx], isBlockFirstCoord[loopIdx - 1]); + isBlockLastCoord[loopIdx] = arith::AndIOp::create( + b, isBlockLastCoord[loopIdx], isBlockLastCoord[loopIdx - 1]); } // Keep building loop nest. @@ -390,24 +391,24 @@ static ParallelComputeFunction createParallelComputeFunction( if (loopIdx + 1 >= op.getNumLoops() - numBlockAlignedInnerLoops) { // For block aligned loops we always iterate starting from 0 up to // the loop trip counts. - b.create<scf::ForOp>(c0, tripCounts[loopIdx + 1], c1, ValueRange(), - workLoopBuilder(loopIdx + 1)); + scf::ForOp::create(b, c0, tripCounts[loopIdx + 1], c1, ValueRange(), + workLoopBuilder(loopIdx + 1)); } else { // Select nested loop lower/upper bounds depending on our position in // the multi-dimensional iteration space. - auto lb = b.create<arith::SelectOp>(isBlockFirstCoord[loopIdx], - blockFirstCoord[loopIdx + 1], c0); + auto lb = arith::SelectOp::create(b, isBlockFirstCoord[loopIdx], + blockFirstCoord[loopIdx + 1], c0); - auto ub = b.create<arith::SelectOp>(isBlockLastCoord[loopIdx], - blockEndCoord[loopIdx + 1], - tripCounts[loopIdx + 1]); + auto ub = arith::SelectOp::create(b, isBlockLastCoord[loopIdx], + blockEndCoord[loopIdx + 1], + tripCounts[loopIdx + 1]); - b.create<scf::ForOp>(lb, ub, c1, ValueRange(), - workLoopBuilder(loopIdx + 1)); + scf::ForOp::create(b, lb, ub, c1, ValueRange(), + workLoopBuilder(loopIdx + 1)); } - b.create<scf::YieldOp>(loc); + scf::YieldOp::create(b, loc); return; } @@ -418,13 +419,13 @@ static ParallelComputeFunction createParallelComputeFunction( for (auto &bodyOp : op.getRegion().front().without_terminator()) b.clone(bodyOp, mapping); - b.create<scf::YieldOp>(loc); + scf::YieldOp::create(b, loc); }; }; - b.create<scf::ForOp>(blockFirstCoord[0], blockEndCoord[0], c1, ValueRange(), - workLoopBuilder(0)); - b.create<func::ReturnOp>(ValueRange()); + scf::ForOp::create(b, blockFirstCoord[0], blockEndCoord[0], c1, ValueRange(), + workLoopBuilder(0)); + func::ReturnOp::create(b, ValueRange()); return {op.getNumLoops(), func, std::move(computeFuncType.captures)}; } @@ -484,8 +485,8 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, b.setInsertionPointToEnd(block); Type indexTy = b.getIndexType(); - Value c1 = b.create<arith::ConstantIndexOp>(1); - Value c2 = b.create<arith::ConstantIndexOp>(2); + Value c1 = arith::ConstantIndexOp::create(b, 1); + Value c2 = arith::ConstantIndexOp::create(b, 2); // Get the async group that will track async dispatch completion. Value group = block->getArgument(0); @@ -500,7 +501,7 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, SmallVector<Location> locations = {loc, loc}; // Create a recursive dispatch loop. - scf::WhileOp whileOp = b.create<scf::WhileOp>(types, operands); + scf::WhileOp whileOp = scf::WhileOp::create(b, types, operands); Block *before = b.createBlock(&whileOp.getBefore(), {}, types, locations); Block *after = b.createBlock(&whileOp.getAfter(), {}, types, locations); @@ -510,10 +511,10 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, b.setInsertionPointToEnd(before); Value start = before->getArgument(0); Value end = before->getArgument(1); - Value distance = b.create<arith::SubIOp>(end, start); + Value distance = arith::SubIOp::create(b, end, start); Value dispatch = - b.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, distance, c1); - b.create<scf::ConditionOp>(dispatch, before->getArguments()); + arith::CmpIOp::create(b, arith::CmpIPredicate::sgt, distance, c1); + scf::ConditionOp::create(b, dispatch, before->getArguments()); } // Setup the async dispatch loop body: recursively call dispatch function @@ -522,9 +523,9 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, b.setInsertionPointToEnd(after); Value start = after->getArgument(0); Value end = after->getArgument(1); - Value distance = b.create<arith::SubIOp>(end, start); - Value halfDistance = b.create<arith::DivSIOp>(distance, c2); - Value midIndex = b.create<arith::AddIOp>(start, halfDistance); + Value distance = arith::SubIOp::create(b, end, start); + Value halfDistance = arith::DivSIOp::create(b, distance, c2); + Value midIndex = arith::AddIOp::create(b, start, halfDistance); // Call parallel compute function inside the async.execute region. auto executeBodyBuilder = [&](OpBuilder &executeBuilder, @@ -535,16 +536,16 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, operands[1] = midIndex; operands[2] = end; - executeBuilder.create<func::CallOp>(executeLoc, func.getSymName(), - func.getResultTypes(), operands); - executeBuilder.create<async::YieldOp>(executeLoc, ValueRange()); + func::CallOp::create(executeBuilder, executeLoc, func.getSymName(), + func.getResultTypes(), operands); + async::YieldOp::create(executeBuilder, executeLoc, ValueRange()); }; // Create async.execute operation to dispatch half of the block range. - auto execute = b.create<ExecuteOp>(TypeRange(), ValueRange(), ValueRange(), - executeBodyBuilder); - b.create<AddToGroupOp>(indexTy, execute.getToken(), group); - b.create<scf::YieldOp>(ValueRange({start, midIndex})); + auto execute = ExecuteOp::create(b, TypeRange(), ValueRange(), ValueRange(), + executeBodyBuilder); + AddToGroupOp::create(b, indexTy, execute.getToken(), group); + scf::YieldOp::create(b, ValueRange({start, midIndex})); } // After dispatching async operations to process the tail of the block range @@ -556,10 +557,9 @@ createAsyncDispatchFunction(ParallelComputeFunction &computeFunc, SmallVector<Value> computeFuncOperands = {blockStart}; computeFuncOperands.append(forwardedInputs.begin(), forwardedInputs.end()); - b.create<func::CallOp>(computeFunc.func.getSymName(), - computeFunc.func.getResultTypes(), - computeFuncOperands); - b.create<func::ReturnOp>(ValueRange()); + func::CallOp::create(b, computeFunc.func.getSymName(), + computeFunc.func.getResultTypes(), computeFuncOperands); + func::ReturnOp::create(b, ValueRange()); return func; } @@ -577,8 +577,8 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, func::FuncOp asyncDispatchFunction = createAsyncDispatchFunction(parallelComputeFunction, rewriter); - Value c0 = b.create<arith::ConstantIndexOp>(0); - Value c1 = b.create<arith::ConstantIndexOp>(1); + Value c0 = arith::ConstantIndexOp::create(b, 0); + Value c1 = arith::ConstantIndexOp::create(b, 1); // Appends operands shared by async dispatch and parallel compute functions to // the given operands vector. @@ -594,7 +594,7 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, // completely. If this will be known statically, then canonicalization will // erase async group operations. Value isSingleBlock = - b.create<arith::CmpIOp>(arith::CmpIPredicate::eq, blockCount, c1); + arith::CmpIOp::create(b, arith::CmpIPredicate::eq, blockCount, c1); auto syncDispatch = [&](OpBuilder &nestedBuilder, Location loc) { ImplicitLocOpBuilder b(loc, nestedBuilder); @@ -603,10 +603,10 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, SmallVector<Value> operands = {c0, blockSize}; appendBlockComputeOperands(operands); - b.create<func::CallOp>(parallelComputeFunction.func.getSymName(), - parallelComputeFunction.func.getResultTypes(), - operands); - b.create<scf::YieldOp>(); + func::CallOp::create(b, parallelComputeFunction.func.getSymName(), + parallelComputeFunction.func.getResultTypes(), + operands); + scf::YieldOp::create(b); }; auto asyncDispatch = [&](OpBuilder &nestedBuilder, Location loc) { @@ -615,24 +615,24 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, // Create an async.group to wait on all async tokens from the concurrent // execution of multiple parallel compute function. First block will be // executed synchronously in the caller thread. - Value groupSize = b.create<arith::SubIOp>(blockCount, c1); - Value group = b.create<CreateGroupOp>(GroupType::get(ctx), groupSize); + Value groupSize = arith::SubIOp::create(b, blockCount, c1); + Value group = CreateGroupOp::create(b, GroupType::get(ctx), groupSize); // Launch async dispatch function for [0, blockCount) range. SmallVector<Value> operands = {group, c0, blockCount, blockSize}; appendBlockComputeOperands(operands); - b.create<func::CallOp>(asyncDispatchFunction.getSymName(), - asyncDispatchFunction.getResultTypes(), operands); + func::CallOp::create(b, asyncDispatchFunction.getSymName(), + asyncDispatchFunction.getResultTypes(), operands); // Wait for the completion of all parallel compute operations. - b.create<AwaitAllOp>(group); + AwaitAllOp::create(b, group); - b.create<scf::YieldOp>(); + scf::YieldOp::create(b); }; // Dispatch either single block compute function, or launch async dispatch. - b.create<scf::IfOp>(isSingleBlock, syncDispatch, asyncDispatch); + scf::IfOp::create(b, isSingleBlock, syncDispatch, asyncDispatch); } // Dispatch parallel compute functions by submitting all async compute tasks @@ -646,14 +646,14 @@ doSequentialDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, func::FuncOp compute = parallelComputeFunction.func; - Value c0 = b.create<arith::ConstantIndexOp>(0); - Value c1 = b.create<arith::ConstantIndexOp>(1); + Value c0 = arith::ConstantIndexOp::create(b, 0); + Value c1 = arith::ConstantIndexOp::create(b, 1); // Create an async.group to wait on all async tokens from the concurrent // execution of multiple parallel compute function. First block will be // executed synchronously in the caller thread. - Value groupSize = b.create<arith::SubIOp>(blockCount, c1); - Value group = b.create<CreateGroupOp>(GroupType::get(ctx), groupSize); + Value groupSize = arith::SubIOp::create(b, blockCount, c1); + Value group = CreateGroupOp::create(b, GroupType::get(ctx), groupSize); // Call parallel compute function for all blocks. using LoopBodyBuilder = @@ -680,28 +680,27 @@ doSequentialDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter, // Call parallel compute function inside the async.execute region. auto executeBodyBuilder = [&](OpBuilder &executeBuilder, Location executeLoc, ValueRange executeArgs) { - executeBuilder.create<func::CallOp>(executeLoc, compute.getSymName(), - compute.getResultTypes(), - computeFuncOperands(iv)); - executeBuilder.create<async::YieldOp>(executeLoc, ValueRange()); + func::CallOp::create(executeBuilder, executeLoc, compute.getSymName(), + compute.getResultTypes(), computeFuncOperands(iv)); + async::YieldOp::create(executeBuilder, executeLoc, ValueRange()); }; // Create async.execute operation to launch parallel computate function. - auto execute = b.create<ExecuteOp>(TypeRange(), ValueRange(), ValueRange(), - executeBodyBuilder); - b.create<AddToGroupOp>(rewriter.getIndexType(), execute.getToken(), group); - b.create<scf::YieldOp>(); + auto execute = ExecuteOp::create(b, TypeRange(), ValueRange(), ValueRange(), + executeBodyBuilder); + AddToGroupOp::create(b, rewriter.getIndexType(), execute.getToken(), group); + scf::YieldOp::create(b); }; // Iterate over all compute blocks and launch parallel compute operations. - b.create<scf::ForOp>(c1, blockCount, c1, ValueRange(), loopBuilder); + scf::ForOp::create(b, c1, blockCount, c1, ValueRange(), loopBuilder); // Call parallel compute function for the first block in the caller thread. - b.create<func::CallOp>(compute.getSymName(), compute.getResultTypes(), - computeFuncOperands(c0)); + func::CallOp::create(b, compute.getSymName(), compute.getResultTypes(), + computeFuncOperands(c0)); // Wait for the completion of all async compute operations. - b.create<AwaitAllOp>(group); + AwaitAllOp::create(b, group); } LogicalResult @@ -737,17 +736,17 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, // for the scf.parallel operation. Value tripCount = tripCounts[0]; for (size_t i = 1; i < tripCounts.size(); ++i) - tripCount = b.create<arith::MulIOp>(tripCount, tripCounts[i]); + tripCount = arith::MulIOp::create(b, tripCount, tripCounts[i]); // Short circuit no-op parallel loops (zero iterations) that can arise from // the memrefs with dynamic dimension(s) equal to zero. - Value c0 = b.create<arith::ConstantIndexOp>(0); + Value c0 = arith::ConstantIndexOp::create(b, 0); Value isZeroIterations = - b.create<arith::CmpIOp>(arith::CmpIPredicate::eq, tripCount, c0); + arith::CmpIOp::create(b, arith::CmpIPredicate::eq, tripCount, c0); // Do absolutely nothing if the trip count is zero. auto noOp = [&](OpBuilder &nestedBuilder, Location loc) { - nestedBuilder.create<scf::YieldOp>(loc); + scf::YieldOp::create(nestedBuilder, loc); }; // Compute the parallel block size and dispatch concurrent tasks computing @@ -797,9 +796,9 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, Value numWorkerThreadsVal; if (numWorkerThreads >= 0) - numWorkerThreadsVal = b.create<arith::ConstantIndexOp>(numWorkerThreads); + numWorkerThreadsVal = arith::ConstantIndexOp::create(b, numWorkerThreads); else - numWorkerThreadsVal = b.create<async::RuntimeNumWorkerThreadsOp>(); + numWorkerThreadsVal = async::RuntimeNumWorkerThreadsOp::create(b); // With large number of threads the value of creating many compute blocks // is reduced because the problem typically becomes memory bound. For this @@ -818,38 +817,38 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, {4, 4.0f}, {8, 2.0f}, {16, 1.0f}, {32, 0.8f}, {64, 0.6f}}; const float initialOvershardingFactor = 8.0f; - Value scalingFactor = b.create<arith::ConstantFloatOp>( - b.getF32Type(), llvm::APFloat(initialOvershardingFactor)); + Value scalingFactor = arith::ConstantFloatOp::create( + b, b.getF32Type(), llvm::APFloat(initialOvershardingFactor)); for (const std::pair<int, float> &p : overshardingBrackets) { - Value bracketBegin = b.create<arith::ConstantIndexOp>(p.first); - Value inBracket = b.create<arith::CmpIOp>( - arith::CmpIPredicate::sgt, numWorkerThreadsVal, bracketBegin); - Value bracketScalingFactor = b.create<arith::ConstantFloatOp>( - b.getF32Type(), llvm::APFloat(p.second)); - scalingFactor = b.create<arith::SelectOp>(inBracket, bracketScalingFactor, - scalingFactor); + Value bracketBegin = arith::ConstantIndexOp::create(b, p.first); + Value inBracket = arith::CmpIOp::create( + b, arith::CmpIPredicate::sgt, numWorkerThreadsVal, bracketBegin); + Value bracketScalingFactor = arith::ConstantFloatOp::create( + b, b.getF32Type(), llvm::APFloat(p.second)); + scalingFactor = arith::SelectOp::create( + b, inBracket, bracketScalingFactor, scalingFactor); } Value numWorkersIndex = - b.create<arith::IndexCastOp>(b.getI32Type(), numWorkerThreadsVal); + arith::IndexCastOp::create(b, b.getI32Type(), numWorkerThreadsVal); Value numWorkersFloat = - b.create<arith::SIToFPOp>(b.getF32Type(), numWorkersIndex); + arith::SIToFPOp::create(b, b.getF32Type(), numWorkersIndex); Value scaledNumWorkers = - b.create<arith::MulFOp>(scalingFactor, numWorkersFloat); + arith::MulFOp::create(b, scalingFactor, numWorkersFloat); Value scaledNumInt = - b.create<arith::FPToSIOp>(b.getI32Type(), scaledNumWorkers); + arith::FPToSIOp::create(b, b.getI32Type(), scaledNumWorkers); Value scaledWorkers = - b.create<arith::IndexCastOp>(b.getIndexType(), scaledNumInt); + arith::IndexCastOp::create(b, b.getIndexType(), scaledNumInt); - Value maxComputeBlocks = b.create<arith::MaxSIOp>( - b.create<arith::ConstantIndexOp>(1), scaledWorkers); + Value maxComputeBlocks = arith::MaxSIOp::create( + b, arith::ConstantIndexOp::create(b, 1), scaledWorkers); // Compute parallel block size from the parallel problem size: // blockSize = min(tripCount, // max(ceil_div(tripCount, maxComputeBlocks), // minTaskSize)) - Value bs0 = b.create<arith::CeilDivSIOp>(tripCount, maxComputeBlocks); - Value bs1 = b.create<arith::MaxSIOp>(bs0, minTaskSize); - Value blockSize = b.create<arith::MinSIOp>(tripCount, bs1); + Value bs0 = arith::CeilDivSIOp::create(b, tripCount, maxComputeBlocks); + Value bs1 = arith::MaxSIOp::create(b, bs0, minTaskSize); + Value blockSize = arith::MinSIOp::create(b, tripCount, bs1); // Dispatch parallel compute function using async recursive work splitting, // or by submitting compute task sequentially from a caller thread. @@ -859,7 +858,7 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, // the parallel operation body for a subset of iteration space. // Compute the number of parallel compute blocks. - Value blockCount = b.create<arith::CeilDivSIOp>(tripCount, blockSize); + Value blockCount = arith::CeilDivSIOp::create(b, tripCount, blockSize); // Dispatch parallel compute function without hints to unroll inner loops. auto dispatchDefault = [&](OpBuilder &nestedBuilder, Location loc) { @@ -868,7 +867,7 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, ImplicitLocOpBuilder b(loc, nestedBuilder); doDispatch(b, rewriter, compute, op, blockSize, blockCount, tripCounts); - b.create<scf::YieldOp>(); + scf::YieldOp::create(b); }; // Dispatch parallel compute function with hints for unrolling inner loops. @@ -879,34 +878,34 @@ AsyncParallelForRewrite::matchAndRewrite(scf::ParallelOp op, ImplicitLocOpBuilder b(loc, nestedBuilder); // Align the block size to be a multiple of the statically known // number of iterations in the inner loops. - Value numIters = b.create<arith::ConstantIndexOp>( - numIterations[op.getNumLoops() - numUnrollableLoops]); - Value alignedBlockSize = b.create<arith::MulIOp>( - b.create<arith::CeilDivSIOp>(blockSize, numIters), numIters); + Value numIters = arith::ConstantIndexOp::create( + b, numIterations[op.getNumLoops() - numUnrollableLoops]); + Value alignedBlockSize = arith::MulIOp::create( + b, arith::CeilDivSIOp::create(b, blockSize, numIters), numIters); doDispatch(b, rewriter, compute, op, alignedBlockSize, blockCount, tripCounts); - b.create<scf::YieldOp>(); + scf::YieldOp::create(b); }; // Dispatch to block aligned compute function only if the computed block // size is larger than the number of iterations in the unrollable inner // loops, because otherwise it can reduce the available parallelism. if (numUnrollableLoops > 0) { - Value numIters = b.create<arith::ConstantIndexOp>( - numIterations[op.getNumLoops() - numUnrollableLoops]); - Value useBlockAlignedComputeFn = b.create<arith::CmpIOp>( - arith::CmpIPredicate::sge, blockSize, numIters); - - b.create<scf::IfOp>(useBlockAlignedComputeFn, dispatchBlockAligned, - dispatchDefault); - b.create<scf::YieldOp>(); + Value numIters = arith::ConstantIndexOp::create( + b, numIterations[op.getNumLoops() - numUnrollableLoops]); + Value useBlockAlignedComputeFn = arith::CmpIOp::create( + b, arith::CmpIPredicate::sge, blockSize, numIters); + + scf::IfOp::create(b, useBlockAlignedComputeFn, dispatchBlockAligned, + dispatchDefault); + scf::YieldOp::create(b); } else { dispatchDefault(b, loc); } }; // Replace the `scf.parallel` operation with the parallel compute function. - b.create<scf::IfOp>(isZeroIterations, noOp, dispatch); + scf::IfOp::create(b, isZeroIterations, noOp, dispatch); // Parallel operation was replaced with a block iteration loop. rewriter.eraseOp(op); @@ -921,7 +920,7 @@ void AsyncParallelForPass::runOnOperation() { populateAsyncParallelForPatterns( patterns, asyncDispatch, numWorkerThreads, [&](ImplicitLocOpBuilder builder, scf::ParallelOp op) { - return builder.create<arith::ConstantIndexOp>(minTaskSize); + return arith::ConstantIndexOp::create(builder, minTaskSize); }); if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) signalPassFailure(); diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp index 0da9b3a..ddc64ea 100644 --- a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp +++ b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp @@ -48,7 +48,7 @@ static LogicalResult dropRefIfNoUses(Value value, unsigned count = 1) { else b.setInsertionPointToStart(value.getParentBlock()); - b.create<RuntimeDropRefOp>(value.getLoc(), value, b.getI64IntegerAttr(1)); + RuntimeDropRefOp::create(b, value.getLoc(), value, b.getI64IntegerAttr(1)); return success(); } @@ -309,7 +309,7 @@ LogicalResult AsyncRuntimeRefCountingPass::addDropRefAfterLastUse(Value value) { // Add a drop_ref immediately after the last user. builder.setInsertionPointAfter(lastUser); - builder.create<RuntimeDropRefOp>(loc, value, builder.getI64IntegerAttr(1)); + RuntimeDropRefOp::create(builder, loc, value, builder.getI64IntegerAttr(1)); } return success(); @@ -327,7 +327,7 @@ AsyncRuntimeRefCountingPass::addAddRefBeforeFunctionCall(Value value) { // Add a reference before the function call to pass the value at `+1` // reference to the function entry block. builder.setInsertionPoint(user); - builder.create<RuntimeAddRefOp>(loc, value, builder.getI64IntegerAttr(1)); + RuntimeAddRefOp::create(builder, loc, value, builder.getI64IntegerAttr(1)); } return success(); @@ -411,12 +411,12 @@ AsyncRuntimeRefCountingPass::addDropRefInDivergentLivenessSuccessor( refCountingBlock = &successor->getParent()->emplaceBlock(); refCountingBlock->moveBefore(successor); OpBuilder builder = OpBuilder::atBlockEnd(refCountingBlock); - builder.create<cf::BranchOp>(value.getLoc(), successor); + cf::BranchOp::create(builder, value.getLoc(), successor); } OpBuilder builder = OpBuilder::atBlockBegin(refCountingBlock); - builder.create<RuntimeDropRefOp>(value.getLoc(), value, - builder.getI64IntegerAttr(1)); + RuntimeDropRefOp::create(builder, value.getLoc(), value, + builder.getI64IntegerAttr(1)); // No need to update the terminator operation. if (successor == refCountingBlock) @@ -507,13 +507,13 @@ AsyncRuntimePolicyBasedRefCountingPass::addRefCounting(Value value) { // Create `add_ref` operation before the operand owner. if (cnt > 0) { b.setInsertionPoint(operand.getOwner()); - b.create<RuntimeAddRefOp>(loc, value, b.getI64IntegerAttr(cnt)); + RuntimeAddRefOp::create(b, loc, value, b.getI64IntegerAttr(cnt)); } // Create `drop_ref` operation after the operand owner. if (cnt < 0) { b.setInsertionPointAfter(operand.getOwner()); - b.create<RuntimeDropRefOp>(loc, value, b.getI64IntegerAttr(-cnt)); + RuntimeDropRefOp::create(b, loc, value, b.getI64IntegerAttr(-cnt)); } } } diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp index 44a3837..112d69c 100644 --- a/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp +++ b/mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp @@ -186,22 +186,22 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) { std::optional<Value> retToken; if (isStateful) - retToken.emplace(builder.create<RuntimeCreateOp>(TokenType::get(ctx))); + retToken.emplace(RuntimeCreateOp::create(builder, TokenType::get(ctx))); llvm::SmallVector<Value, 4> retValues; ArrayRef<Type> resValueTypes = isStateful ? func.getResultTypes().drop_front() : func.getResultTypes(); for (auto resType : resValueTypes) retValues.emplace_back( - builder.create<RuntimeCreateOp>(resType).getResult()); + RuntimeCreateOp::create(builder, resType).getResult()); // ------------------------------------------------------------------------ // // Initialize coroutine: get coroutine id and coroutine handle. // ------------------------------------------------------------------------ // - auto coroIdOp = builder.create<CoroIdOp>(CoroIdType::get(ctx)); + auto coroIdOp = CoroIdOp::create(builder, CoroIdType::get(ctx)); auto coroHdlOp = - builder.create<CoroBeginOp>(CoroHandleType::get(ctx), coroIdOp.getId()); - builder.create<cf::BranchOp>(originalEntryBlock); + CoroBeginOp::create(builder, CoroHandleType::get(ctx), coroIdOp.getId()); + cf::BranchOp::create(builder, originalEntryBlock); Block *cleanupBlock = func.addBlock(); Block *cleanupBlockForDestroy = func.addBlock(); @@ -212,10 +212,10 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) { // ------------------------------------------------------------------------ // auto buildCleanupBlock = [&](Block *cb) { builder.setInsertionPointToStart(cb); - builder.create<CoroFreeOp>(coroIdOp.getId(), coroHdlOp.getHandle()); + CoroFreeOp::create(builder, coroIdOp.getId(), coroHdlOp.getHandle()); // Branch into the suspend block. - builder.create<cf::BranchOp>(suspendBlock); + cf::BranchOp::create(builder, suspendBlock); }; buildCleanupBlock(cleanupBlock); buildCleanupBlock(cleanupBlockForDestroy); @@ -227,7 +227,7 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) { builder.setInsertionPointToStart(suspendBlock); // Mark the end of a coroutine: async.coro.end - builder.create<CoroEndOp>(coroHdlOp.getHandle()); + CoroEndOp::create(builder, coroHdlOp.getHandle()); // Return created optional `async.token` and `async.values` from the suspend // block. This will be the return value of a coroutine ramp function. @@ -235,7 +235,7 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) { if (retToken) ret.push_back(*retToken); llvm::append_range(ret, retValues); - builder.create<func::ReturnOp>(ret); + func::ReturnOp::create(builder, ret); // `async.await` op lowering will create resume blocks for async // continuations, and will conditionally branch to cleanup or suspend blocks. @@ -272,13 +272,13 @@ static Block *setupSetErrorBlock(CoroMachinery &coro) { // Coroutine set_error block: set error on token and all returned values. if (coro.asyncToken) - builder.create<RuntimeSetErrorOp>(*coro.asyncToken); + RuntimeSetErrorOp::create(builder, *coro.asyncToken); for (Value retValue : coro.returnValues) - builder.create<RuntimeSetErrorOp>(retValue); + RuntimeSetErrorOp::create(builder, retValue); // Branch into the cleanup block. - builder.create<cf::BranchOp>(coro.cleanup); + cf::BranchOp::create(builder, coro.cleanup); return *coro.setError; } @@ -333,13 +333,13 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) { // Await on all dependencies before starting to execute the body region. for (size_t i = 0; i < numDependencies; ++i) - builder.create<AwaitOp>(func.getArgument(i)); + AwaitOp::create(builder, func.getArgument(i)); // Await on all async value operands and unwrap the payload. SmallVector<Value, 4> unwrappedOperands(numOperands); for (size_t i = 0; i < numOperands; ++i) { Value operand = func.getArgument(numDependencies + i); - unwrappedOperands[i] = builder.create<AwaitOp>(loc, operand).getResult(); + unwrappedOperands[i] = AwaitOp::create(builder, loc, operand).getResult(); } // Map from function inputs defined above the execute op to the function @@ -366,15 +366,15 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) { // Save the coroutine state: async.coro.save auto coroSaveOp = - builder.create<CoroSaveOp>(CoroStateType::get(ctx), coro.coroHandle); + CoroSaveOp::create(builder, CoroStateType::get(ctx), coro.coroHandle); // Pass coroutine to the runtime to be resumed on a runtime managed // thread. - builder.create<RuntimeResumeOp>(coro.coroHandle); + RuntimeResumeOp::create(builder, coro.coroHandle); // Add async.coro.suspend as a suspended block terminator. - builder.create<CoroSuspendOp>(coroSaveOp.getState(), coro.suspend, - branch.getDest(), coro.cleanupForDestroy); + CoroSuspendOp::create(builder, coroSaveOp.getState(), coro.suspend, + branch.getDest(), coro.cleanupForDestroy); branch.erase(); } @@ -382,8 +382,9 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) { // Replace the original `async.execute` with a call to outlined function. { ImplicitLocOpBuilder callBuilder(loc, execute); - auto callOutlinedFunc = callBuilder.create<func::CallOp>( - func.getName(), execute.getResultTypes(), functionInputs.getArrayRef()); + auto callOutlinedFunc = func::CallOp::create(callBuilder, func.getName(), + execute.getResultTypes(), + functionInputs.getArrayRef()); execute.replaceAllUsesWith(callOutlinedFunc.getResults()); execute.erase(); } @@ -451,7 +452,7 @@ public: Location loc = op->getLoc(); auto newFuncOp = - rewriter.create<func::FuncOp>(loc, op.getName(), op.getFunctionType()); + func::FuncOp::create(rewriter, loc, op.getName(), op.getFunctionType()); SymbolTable::setSymbolVisibility(newFuncOp, SymbolTable::getSymbolVisibility(op)); @@ -521,16 +522,16 @@ public: for (auto tuple : llvm::zip(adaptor.getOperands(), coro.returnValues)) { Value returnValue = std::get<0>(tuple); Value asyncValue = std::get<1>(tuple); - rewriter.create<RuntimeStoreOp>(loc, returnValue, asyncValue); - rewriter.create<RuntimeSetAvailableOp>(loc, asyncValue); + RuntimeStoreOp::create(rewriter, loc, returnValue, asyncValue); + RuntimeSetAvailableOp::create(rewriter, loc, asyncValue); } if (coro.asyncToken) // Switch the coroutine completion token to available state. - rewriter.create<RuntimeSetAvailableOp>(loc, *coro.asyncToken); + RuntimeSetAvailableOp::create(rewriter, loc, *coro.asyncToken); rewriter.eraseOp(op); - rewriter.create<cf::BranchOp>(loc, coro.cleanup); + cf::BranchOp::create(rewriter, loc, coro.cleanup); return success(); } @@ -581,16 +582,17 @@ public: // the async object (token, value or group) to become available. if (!isInCoroutine) { ImplicitLocOpBuilder builder(loc, rewriter); - builder.create<RuntimeAwaitOp>(loc, operand); + RuntimeAwaitOp::create(builder, loc, operand); // Assert that the awaited operands is not in the error state. - Value isError = builder.create<RuntimeIsErrorOp>(i1, operand); - Value notError = builder.create<arith::XOrIOp>( - isError, builder.create<arith::ConstantOp>( - loc, i1, builder.getIntegerAttr(i1, 1))); - - builder.create<cf::AssertOp>(notError, - "Awaited async operand is in error state"); + Value isError = RuntimeIsErrorOp::create(builder, i1, operand); + Value notError = arith::XOrIOp::create( + builder, isError, + arith::ConstantOp::create(builder, loc, i1, + builder.getIntegerAttr(i1, 1))); + + cf::AssertOp::create(builder, notError, + "Awaited async operand is in error state"); } // Inside the coroutine we convert await operation into coroutine suspension @@ -605,28 +607,28 @@ public: // Save the coroutine state and resume on a runtime managed thread when // the operand becomes available. auto coroSaveOp = - builder.create<CoroSaveOp>(CoroStateType::get(ctx), coro.coroHandle); - builder.create<RuntimeAwaitAndResumeOp>(operand, coro.coroHandle); + CoroSaveOp::create(builder, CoroStateType::get(ctx), coro.coroHandle); + RuntimeAwaitAndResumeOp::create(builder, operand, coro.coroHandle); // Split the entry block before the await operation. Block *resume = rewriter.splitBlock(suspended, Block::iterator(op)); // Add async.coro.suspend as a suspended block terminator. builder.setInsertionPointToEnd(suspended); - builder.create<CoroSuspendOp>(coroSaveOp.getState(), coro.suspend, resume, - coro.cleanupForDestroy); + CoroSuspendOp::create(builder, coroSaveOp.getState(), coro.suspend, + resume, coro.cleanupForDestroy); // Split the resume block into error checking and continuation. Block *continuation = rewriter.splitBlock(resume, Block::iterator(op)); // Check if the awaited value is in the error state. builder.setInsertionPointToStart(resume); - auto isError = builder.create<RuntimeIsErrorOp>(loc, i1, operand); - builder.create<cf::CondBranchOp>(isError, - /*trueDest=*/setupSetErrorBlock(coro), - /*trueArgs=*/ArrayRef<Value>(), - /*falseDest=*/continuation, - /*falseArgs=*/ArrayRef<Value>()); + auto isError = RuntimeIsErrorOp::create(builder, loc, i1, operand); + cf::CondBranchOp::create(builder, isError, + /*trueDest=*/setupSetErrorBlock(coro), + /*trueArgs=*/ArrayRef<Value>(), + /*falseDest=*/continuation, + /*falseArgs=*/ArrayRef<Value>()); // Make sure that replacement value will be constructed in the // continuation block. @@ -672,7 +674,7 @@ public: ConversionPatternRewriter &rewriter) const override { // Load from the async value storage. auto valueType = cast<ValueType>(operand.getType()).getValueType(); - return rewriter.create<RuntimeLoadOp>(op->getLoc(), valueType, operand); + return RuntimeLoadOp::create(rewriter, op->getLoc(), valueType, operand); } }; @@ -713,15 +715,15 @@ public: for (auto tuple : llvm::zip(adaptor.getOperands(), coro.returnValues)) { Value yieldValue = std::get<0>(tuple); Value asyncValue = std::get<1>(tuple); - rewriter.create<RuntimeStoreOp>(loc, yieldValue, asyncValue); - rewriter.create<RuntimeSetAvailableOp>(loc, asyncValue); + RuntimeStoreOp::create(rewriter, loc, yieldValue, asyncValue); + RuntimeSetAvailableOp::create(rewriter, loc, asyncValue); } if (coro.asyncToken) // Switch the coroutine completion token to available state. - rewriter.create<RuntimeSetAvailableOp>(loc, *coro.asyncToken); + RuntimeSetAvailableOp::create(rewriter, loc, *coro.asyncToken); - rewriter.create<cf::BranchOp>(loc, coro.cleanup); + cf::BranchOp::create(rewriter, loc, coro.cleanup); rewriter.eraseOp(op); return success(); @@ -755,11 +757,11 @@ public: Block *cont = rewriter.splitBlock(op->getBlock(), Block::iterator(op)); rewriter.setInsertionPointToEnd(cont->getPrevNode()); - rewriter.create<cf::CondBranchOp>(loc, adaptor.getArg(), - /*trueDest=*/cont, - /*trueArgs=*/ArrayRef<Value>(), - /*falseDest=*/setupSetErrorBlock(coro), - /*falseArgs=*/ArrayRef<Value>()); + cf::CondBranchOp::create(rewriter, loc, adaptor.getArg(), + /*trueDest=*/cont, + /*trueArgs=*/ArrayRef<Value>(), + /*falseDest=*/setupSetErrorBlock(coro), + /*falseArgs=*/ArrayRef<Value>()); rewriter.eraseOp(op); return success(); diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp index 2bf326a..4dfba74 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferDeallocationOpInterface.cpp @@ -35,7 +35,7 @@ using namespace bufferization; //===----------------------------------------------------------------------===// static Value buildBoolValue(OpBuilder &builder, Location loc, bool value) { - return builder.create<arith::ConstantOp>(loc, builder.getBoolAttr(value)); + return arith::ConstantOp::create(builder, loc, builder.getBoolAttr(value)); } static bool isMemref(Value v) { return isa<BaseMemRefType>(v.getType()); } @@ -150,7 +150,7 @@ DeallocationState::getMemrefWithUniqueOwnership(OpBuilder &builder, // ownerships more intelligently to not end up with an 'Unknown' ownership in // the first place. auto cloneOp = - builder.create<bufferization::CloneOp>(memref.getLoc(), memref); + bufferization::CloneOp::create(builder, memref.getLoc(), memref); Value condition = buildBoolValue(builder, memref.getLoc(), true); Value newMemref = cloneOp.getResult(); updateOwnership(newMemref, condition); @@ -196,8 +196,8 @@ LogicalResult DeallocationState::getMemrefsAndConditionsToDeallocate( // Simply cast unranked MemRefs to ranked memrefs with 0 dimensions such // that we can call extract_strided_metadata on it. if (auto unrankedMemRefTy = dyn_cast<UnrankedMemRefType>(memref.getType())) - memref = builder.create<memref::ReinterpretCastOp>( - loc, memref, + memref = memref::ReinterpretCastOp::create( + builder, loc, memref, /*offset=*/builder.getIndexAttr(0), /*sizes=*/ArrayRef<OpFoldResult>{}, /*strides=*/ArrayRef<OpFoldResult>{}); @@ -207,7 +207,7 @@ LogicalResult DeallocationState::getMemrefsAndConditionsToDeallocate( // alloc operation has to be passed to the dealloc operation. Passing // subviews, etc. to a dealloc operation is not allowed. memrefs.push_back( - builder.create<memref::ExtractStridedMetadataOp>(loc, memref) + memref::ExtractStridedMetadataOp::create(builder, loc, memref) .getResult(0)); conditions.push_back(ownership.getIndicator()); } @@ -296,8 +296,8 @@ FailureOr<Operation *> deallocation_impl::insertDeallocOpForReturnLike( if (memrefs.empty() && toRetain.empty()) return op; - auto deallocOp = builder.create<bufferization::DeallocOp>( - op->getLoc(), memrefs, conditions, toRetain); + auto deallocOp = bufferization::DeallocOp::create( + builder, op->getLoc(), memrefs, conditions, toRetain); // We want to replace the current ownership of the retained values with the // result values of the dealloc operation as they are always unique. diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp index 8f17a82f..825f63e 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -170,8 +170,8 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue( if (llvm::isa<RankedTensorType>(shapedValue.getType())) { tensor = shapedValue; } else if (llvm::isa<MemRefType>(shapedValue.getType())) { - tensor = b.create<ToTensorOp>( - loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()), + tensor = ToTensorOp::create( + b, loc, memref::getTensorTypeFromMemRefType(shapedValue.getType()), shapedValue); } else if (llvm::isa<UnrankedTensorType>(shapedValue.getType()) || llvm::isa<UnrankedMemRefType>(shapedValue.getType())) { @@ -209,8 +209,8 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue( } // Create AllocTensorOp. - auto allocTensorOp = b.create<AllocTensorOp>(loc, tensorType, dynamicSizes, - copy ? tensor : Value()); + auto allocTensorOp = AllocTensorOp::create(b, loc, tensorType, dynamicSizes, + copy ? tensor : Value()); // Add 'memory_space' attribute. Not needed if 'copy' operand is specified. if (copy) @@ -753,8 +753,8 @@ void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter, // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually // loose all of its users and eventually DCE away. rewriter.setInsertionPointAfter(op); - replacement = rewriter.create<bufferization::ToTensorOp>( - replacement.getLoc(), opResult.getType(), replacement); + replacement = bufferization::ToTensorOp::create( + rewriter, replacement.getLoc(), opResult.getType(), replacement); } replacements.push_back(replacement); } @@ -779,7 +779,7 @@ FailureOr<Value> BufferizationOptions::createAlloc(OpBuilder &b, Location loc, .create<memref::AllocOp>(loc, type, dynShape, b.getI64IntegerAttr(bufferAlignment)) .getResult(); - return b.create<memref::AllocOp>(loc, type, dynShape).getResult(); + return memref::AllocOp::create(b, loc, type, dynShape).getResult(); } /// Create a memory copy between two memref buffers. @@ -788,7 +788,7 @@ LogicalResult BufferizationOptions::createMemCpy(OpBuilder &b, Location loc, if (memCpyFn) return (*memCpyFn)(b, loc, from, to); - b.create<memref::CopyOp>(loc, from, to); + memref::CopyOp::create(b, loc, from, to); return success(); } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp index 875a065..dbc7d0d 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -58,7 +58,7 @@ FailureOr<Value> mlir::bufferization::castOrReallocMemRefValue( // a fix extra conditions in `isGuaranteedCastCompatible`. if (memref::CastOp::areCastCompatible(srcType, destType) && isGuaranteedCastCompatible(srcType, destType)) { - Value casted = b.create<memref::CastOp>(value.getLoc(), destType, value); + Value casted = memref::CastOp::create(b, value.getLoc(), destType, value); return casted; } @@ -67,7 +67,7 @@ FailureOr<Value> mlir::bufferization::castOrReallocMemRefValue( for (int i = 0; i < destType.getRank(); ++i) { if (destType.getShape()[i] != ShapedType::kDynamic) continue; - Value size = b.create<memref::DimOp>(loc, value, i); + Value size = memref::DimOp::create(b, loc, value, i); dynamicOperands.push_back(size); } @@ -134,10 +134,10 @@ void mlir::bufferization::populateDynamicDimSizes( for (int64_t i = 0; i < shapedType.getRank(); ++i) { if (shapedType.isDynamicDim(i)) { if (llvm::isa<MemRefType>(shapedType)) { - dynamicDims.push_back(b.create<memref::DimOp>(loc, shapedValue, i)); + dynamicDims.push_back(memref::DimOp::create(b, loc, shapedValue, i)); } else { assert(llvm::isa<RankedTensorType>(shapedType) && "expected tensor"); - dynamicDims.push_back(b.create<tensor::DimOp>(loc, shapedValue, i)); + dynamicDims.push_back(tensor::DimOp::create(b, loc, shapedValue, i)); } } } @@ -321,8 +321,8 @@ struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> { newShape, op.getType().getElementType(), op.getType().getEncoding()); if (newType == op.getType()) return failure(); - auto newOp = rewriter.create<AllocTensorOp>( - op.getLoc(), newType, newDynamicSizes, /*copy=*/Value()); + auto newOp = AllocTensorOp::create(rewriter, op.getLoc(), newType, + newDynamicSizes, /*copy=*/Value()); rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp); return success(); } @@ -427,7 +427,7 @@ void AllocTensorOp::print(OpAsmPrinter &p) { Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) { assert(isDynamicDim(idx) && "expected dynamic dim"); if (getCopy()) - return b.create<tensor::DimOp>(getLoc(), getCopy(), idx); + return tensor::DimOp::create(b, getLoc(), getCopy(), idx); return getOperand(getIndexOfDynamicSize(idx)); } @@ -513,8 +513,8 @@ struct SimplifyClones : public OpRewritePattern<CloneOp> { } if (source.getType() != cloneOp.getType()) - source = rewriter.create<memref::CastOp>(cloneOp.getLoc(), - cloneOp.getType(), source); + source = memref::CastOp::create(rewriter, cloneOp.getLoc(), + cloneOp.getType(), source); rewriter.replaceOp(cloneOp, source); rewriter.eraseOp(redundantDealloc); return success(); @@ -538,7 +538,7 @@ LogicalResult DeallocTensorOp::bufferize(RewriterBase &rewriter, FailureOr<Value> buffer = getBuffer(rewriter, getTensor(), options, state); if (failed(buffer)) return failure(); - rewriter.create<memref::DeallocOp>(getLoc(), *buffer); + memref::DeallocOp::create(rewriter, getLoc(), *buffer); rewriter.eraseOp(getOperation()); return success(); } @@ -643,8 +643,9 @@ Value MaterializeInDestinationOp::buildSubsetExtraction(OpBuilder &builder, assert(getRestrict() && "expected that ops with memrefs dest have 'restrict'"); setRestrict(false); - return builder.create<ToTensorOp>( - loc, memref::getTensorTypeFromMemRefType(getDest().getType()), getDest(), + return ToTensorOp::create( + builder, loc, memref::getTensorTypeFromMemRefType(getDest().getType()), + getDest(), /*restrict=*/true, getWritable()); } @@ -806,8 +807,8 @@ struct ToBufferOfCast : public OpRewritePattern<ToBufferOp> { return failure(); auto memrefType = MemRefType::get(srcTensorType.getShape(), srcTensorType.getElementType()); - Value memref = rewriter.create<ToBufferOp>(toBuffer.getLoc(), memrefType, - tensorCastOperand.getOperand()); + Value memref = ToBufferOp::create(rewriter, toBuffer.getLoc(), memrefType, + tensorCastOperand.getOperand()); rewriter.replaceOpWithNewOp<memref::CastOp>(toBuffer, toBuffer.getType(), memref); return success(); @@ -880,12 +881,12 @@ LogicalResult ToBufferOp::bufferize(RewriterBase &rewriter, std::optional<Operation *> CloneOp::buildDealloc(OpBuilder &builder, Value alloc) { - return builder.create<memref::DeallocOp>(alloc.getLoc(), alloc) + return memref::DeallocOp::create(builder, alloc.getLoc(), alloc) .getOperation(); } std::optional<Value> CloneOp::buildClone(OpBuilder &builder, Value alloc) { - return builder.create<CloneOp>(alloc.getLoc(), alloc).getResult(); + return CloneOp::create(builder, alloc.getLoc(), alloc).getResult(); } //===----------------------------------------------------------------------===// @@ -959,7 +960,7 @@ struct DeallocRemoveDuplicateDeallocMemrefs Value &newCond = newConditions[memrefToCondition[memref]]; if (newCond != cond) newCond = - rewriter.create<arith::OrIOp>(deallocOp.getLoc(), newCond, cond); + arith::OrIOp::create(rewriter, deallocOp.getLoc(), newCond, cond); } else { memrefToCondition.insert({memref, newConditions.size()}); newMemrefs.push_back(memref); @@ -1014,8 +1015,8 @@ struct DeallocRemoveDuplicateRetainedMemrefs // We need to create a new op because the number of results is always the // same as the number of condition operands. auto newDeallocOp = - rewriter.create<DeallocOp>(deallocOp.getLoc(), deallocOp.getMemrefs(), - deallocOp.getConditions(), newRetained); + DeallocOp::create(rewriter, deallocOp.getLoc(), deallocOp.getMemrefs(), + deallocOp.getConditions(), newRetained); SmallVector<Value> replacements( llvm::map_range(resultReplacementIdx, [&](unsigned idx) { return newDeallocOp.getUpdatedConditions()[idx]; @@ -1036,8 +1037,8 @@ struct EraseEmptyDealloc : public OpRewritePattern<DeallocOp> { LogicalResult matchAndRewrite(DeallocOp deallocOp, PatternRewriter &rewriter) const override { if (deallocOp.getMemrefs().empty()) { - Value constFalse = rewriter.create<arith::ConstantOp>( - deallocOp.getLoc(), rewriter.getBoolAttr(false)); + Value constFalse = arith::ConstantOp::create(rewriter, deallocOp.getLoc(), + rewriter.getBoolAttr(false)); rewriter.replaceOp( deallocOp, SmallVector<Value>(deallocOp.getUpdatedConditions().size(), constFalse)); diff --git a/mlir/lib/Dialect/Bufferization/TransformOps/BufferizationTransformOps.cpp b/mlir/lib/Dialect/Bufferization/TransformOps/BufferizationTransformOps.cpp index db1eb20..7f495b0 100644 --- a/mlir/lib/Dialect/Bufferization/TransformOps/BufferizationTransformOps.cpp +++ b/mlir/lib/Dialect/Bufferization/TransformOps/BufferizationTransformOps.cpp @@ -70,12 +70,12 @@ transform::OneShotBufferizeOp::apply(transform::TransformRewriter &rewriter, *getFunctionBoundaryTypeConversion()); if (getMemcpyOp() == "memref.copy") { options.memCpyFn = [](OpBuilder &b, Location loc, Value from, Value to) { - b.create<memref::CopyOp>(loc, from, to); + memref::CopyOp::create(b, loc, from, to); return success(); }; } else if (getMemcpyOp() == "linalg.copy") { options.memCpyFn = [](OpBuilder &b, Location loc, Value from, Value to) { - b.create<linalg::CopyOp>(loc, from, to); + linalg::CopyOp::create(b, loc, from, to); return success(); }; } else { diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp index c5fab80..8916526 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocationSimplification.cpp @@ -167,8 +167,8 @@ struct RemoveDeallocMemrefsContainedInRetained std::optional<bool> analysisResult = analysis.isSameAllocation(retained, memref); if (analysisResult == true) { - auto disjunction = rewriter.create<arith::OrIOp>( - deallocOp.getLoc(), updatedCondition, cond); + auto disjunction = arith::OrIOp::create(rewriter, deallocOp.getLoc(), + updatedCondition, cond); rewriter.replaceAllUsesExcept(updatedCondition, disjunction.getResult(), disjunction); } @@ -247,16 +247,16 @@ struct RemoveRetainedMemrefsGuaranteedToNotAlias continue; } - replacements.push_back(rewriter.create<arith::ConstantOp>( - deallocOp.getLoc(), rewriter.getBoolAttr(false))); + replacements.push_back(arith::ConstantOp::create( + rewriter, deallocOp.getLoc(), rewriter.getBoolAttr(false))); } if (newRetainedMemrefs.size() == deallocOp.getRetained().size()) return failure(); - auto newDeallocOp = rewriter.create<DeallocOp>( - deallocOp.getLoc(), deallocOp.getMemrefs(), deallocOp.getConditions(), - newRetainedMemrefs); + auto newDeallocOp = + DeallocOp::create(rewriter, deallocOp.getLoc(), deallocOp.getMemrefs(), + deallocOp.getConditions(), newRetainedMemrefs); int i = 0; for (auto &repl : replacements) { if (!repl) @@ -326,8 +326,8 @@ struct SplitDeallocWhenNotAliasingAnyOther } // Create new bufferization.dealloc op for `memref`. - auto newDeallocOp = rewriter.create<DeallocOp>(loc, memref, cond, - deallocOp.getRetained()); + auto newDeallocOp = DeallocOp::create(rewriter, loc, memref, cond, + deallocOp.getRetained()); updatedConditions.push_back( llvm::to_vector(ValueRange(newDeallocOp.getUpdatedConditions()))); } @@ -337,8 +337,9 @@ struct SplitDeallocWhenNotAliasingAnyOther return failure(); // Create bufferization.dealloc op for all remaining memrefs. - auto newDeallocOp = rewriter.create<DeallocOp>( - loc, remainingMemrefs, remainingConditions, deallocOp.getRetained()); + auto newDeallocOp = + DeallocOp::create(rewriter, loc, remainingMemrefs, remainingConditions, + deallocOp.getRetained()); // Bit-or all conditions. SmallVector<Value> replacements = @@ -347,8 +348,8 @@ struct SplitDeallocWhenNotAliasingAnyOther assert(replacements.size() == additionalConditions.size() && "expected same number of updated conditions"); for (int64_t i = 0, e = replacements.size(); i < e; ++i) { - replacements[i] = rewriter.create<arith::OrIOp>( - loc, replacements[i], additionalConditions[i]); + replacements[i] = arith::OrIOp::create(rewriter, loc, replacements[i], + additionalConditions[i]); } } rewriter.replaceOp(deallocOp, replacements); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp index 6924e88..e30e094 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferResultsToOutParams.cpp @@ -132,7 +132,7 @@ updateReturnOps(func::FuncOp func, ArrayRef<BlockArgument> appendedEntryArgs, return WalkResult::interrupt(); } } - builder.create<func::ReturnOp>(op.getLoc(), keepAsReturnOperands); + func::ReturnOp::create(builder, op.getLoc(), keepAsReturnOperands); op.erase(); return WalkResult::advance(); }); @@ -190,7 +190,7 @@ updateCalls(ModuleOp module, assert(hasFullyDynamicLayoutMap(memrefType) && "layout map not supported"); outParam = - builder.create<memref::CastOp>(op.getLoc(), memrefType, outParam); + memref::CastOp::create(builder, op.getLoc(), memrefType, outParam); } memref.replaceAllUsesWith(outParam); outParams.push_back(outParam); @@ -200,8 +200,8 @@ updateCalls(ModuleOp module, newOperands.append(outParams.begin(), outParams.end()); auto newResultTypes = llvm::to_vector<6>(llvm::map_range( replaceWithNewCallResults, [](Value v) { return v.getType(); })); - auto newCall = builder.create<func::CallOp>(op.getLoc(), op.getCalleeAttr(), - newResultTypes, newOperands); + auto newCall = func::CallOp::create( + builder, op.getLoc(), op.getCalleeAttr(), newResultTypes, newOperands); for (auto t : llvm::zip(replaceWithNewCallResults, newCall.getResults())) std::get<0>(t).replaceAllUsesWith(std::get<1>(t)); op.erase(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp index a66be7d..c0e0809 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp @@ -141,8 +141,9 @@ bufferization::getGlobalFor(arith::ConstantOp constantOp, cast<MemRefType>(getMemRefTypeWithStaticIdentityLayout(type)); if (memorySpace) memrefType = MemRefType::Builder(memrefType).setMemorySpace(memorySpace); - auto global = globalBuilder.create<memref::GlobalOp>( - constantOp.getLoc(), (Twine("__constant_") + os.str()).str(), + auto global = memref::GlobalOp::create( + globalBuilder, constantOp.getLoc(), + (Twine("__constant_") + os.str()).str(), /*sym_visibility=*/globalBuilder.getStringAttr("private"), /*type=*/memrefType, /*initial_value=*/cast<ElementsAttr>(constantOp.getValue()), diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp index 246555d..91f6f25 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -434,8 +434,8 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, // Replace all uses of the original tensor bbArg. rewriter.setInsertionPointToStart(block); if (!bbArgUses.empty()) { - Value toTensorOp = rewriter.create<bufferization::ToTensorOp>( - bbArg.getLoc(), tensorType, bbArg); + Value toTensorOp = bufferization::ToTensorOp::create( + rewriter, bbArg.getLoc(), tensorType, bbArg); for (OpOperand *use : bbArgUses) use->set(toTensorOp); } @@ -466,13 +466,13 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, if (failed(operandBufferType)) return failure(); rewriter.setInsertionPointAfterValue(operand); - Value bufferizedOperand = rewriter.create<bufferization::ToBufferOp>( - operand.getLoc(), *operandBufferType, operand); + Value bufferizedOperand = bufferization::ToBufferOp::create( + rewriter, operand.getLoc(), *operandBufferType, operand); // A cast is needed if the operand and the block argument have different // bufferized types. if (type != *operandBufferType) - bufferizedOperand = rewriter.create<memref::CastOp>( - operand.getLoc(), type, bufferizedOperand); + bufferizedOperand = memref::CastOp::create(rewriter, operand.getLoc(), + type, bufferizedOperand); newOperands.push_back(bufferizedOperand); } operands.getMutableForwardedOperands().assign(newOperands); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp index c10d290..a50ddbe 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp @@ -118,8 +118,8 @@ mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) { // Update function calls. for (func::CallOp callOp : callerMap[funcOp]) { rewriter.setInsertionPoint(callOp); - auto newCallOp = rewriter.create<func::CallOp>(callOp.getLoc(), funcOp, - callOp.getOperands()); + auto newCallOp = func::CallOp::create(rewriter, callOp.getLoc(), funcOp, + callOp.getOperands()); SmallVector<Value> newResults; int64_t nextResult = 0; for (int64_t i = 0; i < callOp.getNumResults(); ++i) { @@ -134,8 +134,8 @@ mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) { Type expectedType = callOp.getResult(i).getType(); if (replacement.getType() != expectedType) { // A cast must be inserted at the call site. - replacement = rewriter.create<memref::CastOp>( - callOp.getLoc(), expectedType, replacement); + replacement = memref::CastOp::create(rewriter, callOp.getLoc(), + expectedType, replacement); } newResults.push_back(replacement); } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp b/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp index b7db2e8..1784964 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp @@ -168,8 +168,8 @@ LogicalResult mlir::bufferization::eliminateEmptyTensors( cast<ShapedType>(v.getType()).getElementType()) continue; rewriter.setInsertionPointAfterValue(replacement); - replacement = rewriter.create<tensor::CastOp>(v.getLoc(), v.getType(), - replacement); + replacement = tensor::CastOp::create(rewriter, v.getLoc(), v.getType(), + replacement); } // Replace the specific use of the tensor::EmptyOp. rewriter.modifyOpInPlace(user, [&]() { diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp index 2a98203..f69efd1 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -319,8 +319,9 @@ struct CallOpInterface } // 3. Create the new CallOp. - Operation *newCallOp = rewriter.create<func::CallOp>( - callOp.getLoc(), funcOp.getSymName(), resultTypes, newOperands); + Operation *newCallOp = + func::CallOp::create(rewriter, callOp.getLoc(), funcOp.getSymName(), + resultTypes, newOperands); newCallOp->setAttrs(callOp->getAttrs()); // 4. Replace the old op with the new op. @@ -483,8 +484,8 @@ struct FuncOpInterface // Note: If `inferFunctionResultLayout = true`, casts are later folded // away. - Value toBufferOp = rewriter.create<bufferization::ToBufferOp>( - returnOp.getLoc(), bufferizedType, returnVal); + Value toBufferOp = bufferization::ToBufferOp::create( + rewriter, returnOp.getLoc(), bufferizedType, returnVal); returnValues.push_back(toBufferOp); } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp b/mlir/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp index a611126..f0d65b0 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp @@ -64,8 +64,8 @@ class DeallocOpConversion rewriter.replaceOpWithNewOp<scf::IfOp>( op, adaptor.getConditions()[0], [&](OpBuilder &builder, Location loc) { - builder.create<memref::DeallocOp>(loc, adaptor.getMemrefs()[0]); - builder.create<scf::YieldOp>(loc); + memref::DeallocOp::create(builder, loc, adaptor.getMemrefs()[0]); + scf::YieldOp::create(builder, loc); }); return success(); } @@ -108,45 +108,46 @@ class DeallocOpConversion // Compute the base pointer indices, compare all retained indices to the // memref index to check if they alias. SmallVector<Value> doesNotAliasList; - Value memrefAsIdx = rewriter.create<memref::ExtractAlignedPointerAsIndexOp>( - op->getLoc(), adaptor.getMemrefs()[0]); + Value memrefAsIdx = memref::ExtractAlignedPointerAsIndexOp::create( + rewriter, op->getLoc(), adaptor.getMemrefs()[0]); for (Value retained : adaptor.getRetained()) { - Value retainedAsIdx = - rewriter.create<memref::ExtractAlignedPointerAsIndexOp>(op->getLoc(), - retained); - Value doesNotAlias = rewriter.create<arith::CmpIOp>( - op->getLoc(), arith::CmpIPredicate::ne, memrefAsIdx, retainedAsIdx); + Value retainedAsIdx = memref::ExtractAlignedPointerAsIndexOp::create( + rewriter, op->getLoc(), retained); + Value doesNotAlias = arith::CmpIOp::create(rewriter, op->getLoc(), + arith::CmpIPredicate::ne, + memrefAsIdx, retainedAsIdx); doesNotAliasList.push_back(doesNotAlias); } // AND-reduce the list of booleans from above. Value prev = doesNotAliasList.front(); for (Value doesNotAlias : ArrayRef(doesNotAliasList).drop_front()) - prev = rewriter.create<arith::AndIOp>(op->getLoc(), prev, doesNotAlias); + prev = arith::AndIOp::create(rewriter, op->getLoc(), prev, doesNotAlias); // Also consider the condition given by the dealloc operation and perform a // conditional deallocation guarded by that value. - Value shouldDealloc = rewriter.create<arith::AndIOp>( - op->getLoc(), prev, adaptor.getConditions()[0]); + Value shouldDealloc = arith::AndIOp::create(rewriter, op->getLoc(), prev, + adaptor.getConditions()[0]); - rewriter.create<scf::IfOp>( - op.getLoc(), shouldDealloc, [&](OpBuilder &builder, Location loc) { - builder.create<memref::DeallocOp>(loc, adaptor.getMemrefs()[0]); - builder.create<scf::YieldOp>(loc); - }); + scf::IfOp::create(rewriter, op.getLoc(), shouldDealloc, + [&](OpBuilder &builder, Location loc) { + memref::DeallocOp::create(builder, loc, + adaptor.getMemrefs()[0]); + scf::YieldOp::create(builder, loc); + }); // Compute the replacement values for the dealloc operation results. This // inserts an already canonicalized form of // `select(does_alias_with_memref(r), memref_cond, false)` for each retained // value r. SmallVector<Value> replacements; - Value trueVal = rewriter.create<arith::ConstantOp>( - op->getLoc(), rewriter.getBoolAttr(true)); + Value trueVal = arith::ConstantOp::create(rewriter, op->getLoc(), + rewriter.getBoolAttr(true)); for (Value doesNotAlias : doesNotAliasList) { Value aliases = - rewriter.create<arith::XOrIOp>(op->getLoc(), doesNotAlias, trueVal); - Value result = rewriter.create<arith::AndIOp>(op->getLoc(), aliases, - adaptor.getConditions()[0]); + arith::XOrIOp::create(rewriter, op->getLoc(), doesNotAlias, trueVal); + Value result = arith::AndIOp::create(rewriter, op->getLoc(), aliases, + adaptor.getConditions()[0]); replacements.push_back(result); } @@ -230,108 +231,112 @@ class DeallocOpConversion // Without storing them to memrefs, we could not use for-loops but only a // completely unrolled version of it, potentially leading to code-size // blow-up. - Value toDeallocMemref = rewriter.create<memref::AllocOp>( - op.getLoc(), MemRefType::get({(int64_t)adaptor.getMemrefs().size()}, - rewriter.getIndexType())); - Value conditionMemref = rewriter.create<memref::AllocOp>( - op.getLoc(), MemRefType::get({(int64_t)adaptor.getConditions().size()}, - rewriter.getI1Type())); - Value toRetainMemref = rewriter.create<memref::AllocOp>( - op.getLoc(), MemRefType::get({(int64_t)adaptor.getRetained().size()}, - rewriter.getIndexType())); + Value toDeallocMemref = memref::AllocOp::create( + rewriter, op.getLoc(), + MemRefType::get({(int64_t)adaptor.getMemrefs().size()}, + rewriter.getIndexType())); + Value conditionMemref = memref::AllocOp::create( + rewriter, op.getLoc(), + MemRefType::get({(int64_t)adaptor.getConditions().size()}, + rewriter.getI1Type())); + Value toRetainMemref = memref::AllocOp::create( + rewriter, op.getLoc(), + MemRefType::get({(int64_t)adaptor.getRetained().size()}, + rewriter.getIndexType())); auto getConstValue = [&](uint64_t value) -> Value { - return rewriter.create<arith::ConstantOp>(op.getLoc(), - rewriter.getIndexAttr(value)); + return arith::ConstantOp::create(rewriter, op.getLoc(), + rewriter.getIndexAttr(value)); }; // Extract the base pointers of the memrefs as indices to check for aliasing // at runtime. for (auto [i, toDealloc] : llvm::enumerate(adaptor.getMemrefs())) { - Value memrefAsIdx = - rewriter.create<memref::ExtractAlignedPointerAsIndexOp>(op.getLoc(), - toDealloc); - rewriter.create<memref::StoreOp>(op.getLoc(), memrefAsIdx, - toDeallocMemref, getConstValue(i)); + Value memrefAsIdx = memref::ExtractAlignedPointerAsIndexOp::create( + rewriter, op.getLoc(), toDealloc); + memref::StoreOp::create(rewriter, op.getLoc(), memrefAsIdx, + toDeallocMemref, getConstValue(i)); } for (auto [i, cond] : llvm::enumerate(adaptor.getConditions())) - rewriter.create<memref::StoreOp>(op.getLoc(), cond, conditionMemref, - getConstValue(i)); + memref::StoreOp::create(rewriter, op.getLoc(), cond, conditionMemref, + getConstValue(i)); for (auto [i, toRetain] : llvm::enumerate(adaptor.getRetained())) { - Value memrefAsIdx = - rewriter.create<memref::ExtractAlignedPointerAsIndexOp>(op.getLoc(), - toRetain); - rewriter.create<memref::StoreOp>(op.getLoc(), memrefAsIdx, toRetainMemref, - getConstValue(i)); + Value memrefAsIdx = memref::ExtractAlignedPointerAsIndexOp::create( + rewriter, op.getLoc(), toRetain); + memref::StoreOp::create(rewriter, op.getLoc(), memrefAsIdx, + toRetainMemref, getConstValue(i)); } // Cast the allocated memrefs to dynamic shape because we want only one // helper function no matter how many operands the bufferization.dealloc // has. - Value castedDeallocMemref = rewriter.create<memref::CastOp>( - op->getLoc(), + Value castedDeallocMemref = memref::CastOp::create( + rewriter, op->getLoc(), MemRefType::get({ShapedType::kDynamic}, rewriter.getIndexType()), toDeallocMemref); - Value castedCondsMemref = rewriter.create<memref::CastOp>( - op->getLoc(), + Value castedCondsMemref = memref::CastOp::create( + rewriter, op->getLoc(), MemRefType::get({ShapedType::kDynamic}, rewriter.getI1Type()), conditionMemref); - Value castedRetainMemref = rewriter.create<memref::CastOp>( - op->getLoc(), + Value castedRetainMemref = memref::CastOp::create( + rewriter, op->getLoc(), MemRefType::get({ShapedType::kDynamic}, rewriter.getIndexType()), toRetainMemref); - Value deallocCondsMemref = rewriter.create<memref::AllocOp>( - op.getLoc(), MemRefType::get({(int64_t)adaptor.getMemrefs().size()}, - rewriter.getI1Type())); - Value retainCondsMemref = rewriter.create<memref::AllocOp>( - op.getLoc(), MemRefType::get({(int64_t)adaptor.getRetained().size()}, - rewriter.getI1Type())); - - Value castedDeallocCondsMemref = rewriter.create<memref::CastOp>( - op->getLoc(), + Value deallocCondsMemref = memref::AllocOp::create( + rewriter, op.getLoc(), + MemRefType::get({(int64_t)adaptor.getMemrefs().size()}, + rewriter.getI1Type())); + Value retainCondsMemref = memref::AllocOp::create( + rewriter, op.getLoc(), + MemRefType::get({(int64_t)adaptor.getRetained().size()}, + rewriter.getI1Type())); + + Value castedDeallocCondsMemref = memref::CastOp::create( + rewriter, op->getLoc(), MemRefType::get({ShapedType::kDynamic}, rewriter.getI1Type()), deallocCondsMemref); - Value castedRetainCondsMemref = rewriter.create<memref::CastOp>( - op->getLoc(), + Value castedRetainCondsMemref = memref::CastOp::create( + rewriter, op->getLoc(), MemRefType::get({ShapedType::kDynamic}, rewriter.getI1Type()), retainCondsMemref); Operation *symtableOp = op->getParentWithTrait<OpTrait::SymbolTable>(); - rewriter.create<func::CallOp>( - op.getLoc(), deallocHelperFuncMap.lookup(symtableOp), + func::CallOp::create( + rewriter, op.getLoc(), deallocHelperFuncMap.lookup(symtableOp), SmallVector<Value>{castedDeallocMemref, castedRetainMemref, castedCondsMemref, castedDeallocCondsMemref, castedRetainCondsMemref}); for (unsigned i = 0, e = adaptor.getMemrefs().size(); i < e; ++i) { Value idxValue = getConstValue(i); - Value shouldDealloc = rewriter.create<memref::LoadOp>( - op.getLoc(), deallocCondsMemref, idxValue); - rewriter.create<scf::IfOp>( - op.getLoc(), shouldDealloc, [&](OpBuilder &builder, Location loc) { - builder.create<memref::DeallocOp>(loc, adaptor.getMemrefs()[i]); - builder.create<scf::YieldOp>(loc); - }); + Value shouldDealloc = memref::LoadOp::create( + rewriter, op.getLoc(), deallocCondsMemref, idxValue); + scf::IfOp::create(rewriter, op.getLoc(), shouldDealloc, + [&](OpBuilder &builder, Location loc) { + memref::DeallocOp::create(builder, loc, + adaptor.getMemrefs()[i]); + scf::YieldOp::create(builder, loc); + }); } SmallVector<Value> replacements; for (unsigned i = 0, e = adaptor.getRetained().size(); i < e; ++i) { Value idxValue = getConstValue(i); - Value ownership = rewriter.create<memref::LoadOp>( - op.getLoc(), retainCondsMemref, idxValue); + Value ownership = memref::LoadOp::create(rewriter, op.getLoc(), + retainCondsMemref, idxValue); replacements.push_back(ownership); } // Deallocate above allocated memrefs again to avoid memory leaks. // Deallocation will not be run on code after this stage. - rewriter.create<memref::DeallocOp>(op.getLoc(), toDeallocMemref); - rewriter.create<memref::DeallocOp>(op.getLoc(), toRetainMemref); - rewriter.create<memref::DeallocOp>(op.getLoc(), conditionMemref); - rewriter.create<memref::DeallocOp>(op.getLoc(), deallocCondsMemref); - rewriter.create<memref::DeallocOp>(op.getLoc(), retainCondsMemref); + memref::DeallocOp::create(rewriter, op.getLoc(), toDeallocMemref); + memref::DeallocOp::create(rewriter, op.getLoc(), toRetainMemref); + memref::DeallocOp::create(rewriter, op.getLoc(), conditionMemref); + memref::DeallocOp::create(rewriter, op.getLoc(), deallocCondsMemref); + memref::DeallocOp::create(rewriter, op.getLoc(), retainCondsMemref); rewriter.replaceOp(op, replacements); return success(); @@ -349,8 +354,8 @@ public: ConversionPatternRewriter &rewriter) const override { // Lower the trivial case. if (adaptor.getMemrefs().empty()) { - Value falseVal = rewriter.create<arith::ConstantOp>( - op.getLoc(), rewriter.getBoolAttr(false)); + Value falseVal = arith::ConstantOp::create(rewriter, op.getLoc(), + rewriter.getBoolAttr(false)); rewriter.replaceOp( op, SmallVector<Value>(adaptor.getRetained().size(), falseVal)); return success(); @@ -449,30 +454,31 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction( Value retainCondsMemref = helperFuncOp.getArguments()[4]; // Insert some prerequisites. - Value c0 = builder.create<arith::ConstantOp>(loc, builder.getIndexAttr(0)); - Value c1 = builder.create<arith::ConstantOp>(loc, builder.getIndexAttr(1)); + Value c0 = arith::ConstantOp::create(builder, loc, builder.getIndexAttr(0)); + Value c1 = arith::ConstantOp::create(builder, loc, builder.getIndexAttr(1)); Value trueValue = - builder.create<arith::ConstantOp>(loc, builder.getBoolAttr(true)); + arith::ConstantOp::create(builder, loc, builder.getBoolAttr(true)); Value falseValue = - builder.create<arith::ConstantOp>(loc, builder.getBoolAttr(false)); - Value toDeallocSize = builder.create<memref::DimOp>(loc, toDeallocMemref, c0); - Value toRetainSize = builder.create<memref::DimOp>(loc, toRetainMemref, c0); + arith::ConstantOp::create(builder, loc, builder.getBoolAttr(false)); + Value toDeallocSize = + memref::DimOp::create(builder, loc, toDeallocMemref, c0); + Value toRetainSize = memref::DimOp::create(builder, loc, toRetainMemref, c0); - builder.create<scf::ForOp>( - loc, c0, toRetainSize, c1, ValueRange(), + scf::ForOp::create( + builder, loc, c0, toRetainSize, c1, ValueRange(), [&](OpBuilder &builder, Location loc, Value i, ValueRange iterArgs) { - builder.create<memref::StoreOp>(loc, falseValue, retainCondsMemref, i); - builder.create<scf::YieldOp>(loc); + memref::StoreOp::create(builder, loc, falseValue, retainCondsMemref, i); + scf::YieldOp::create(builder, loc); }); - builder.create<scf::ForOp>( - loc, c0, toDeallocSize, c1, ValueRange(), + scf::ForOp::create( + builder, loc, c0, toDeallocSize, c1, ValueRange(), [&](OpBuilder &builder, Location loc, Value outerIter, ValueRange iterArgs) { Value toDealloc = - builder.create<memref::LoadOp>(loc, toDeallocMemref, outerIter); + memref::LoadOp::create(builder, loc, toDeallocMemref, outerIter); Value cond = - builder.create<memref::LoadOp>(loc, conditionMemref, outerIter); + memref::LoadOp::create(builder, loc, conditionMemref, outerIter); // Build the first for loop that computes aliasing with retained // memrefs. @@ -482,31 +488,29 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction( loc, c0, toRetainSize, c1, trueValue, [&](OpBuilder &builder, Location loc, Value i, ValueRange iterArgs) { - Value retainValue = builder.create<memref::LoadOp>( - loc, toRetainMemref, i); - Value doesAlias = builder.create<arith::CmpIOp>( - loc, arith::CmpIPredicate::eq, retainValue, + Value retainValue = memref::LoadOp::create( + builder, loc, toRetainMemref, i); + Value doesAlias = arith::CmpIOp::create( + builder, loc, arith::CmpIPredicate::eq, retainValue, toDealloc); - builder.create<scf::IfOp>( - loc, doesAlias, + scf::IfOp::create( + builder, loc, doesAlias, [&](OpBuilder &builder, Location loc) { - Value retainCondValue = - builder.create<memref::LoadOp>( - loc, retainCondsMemref, i); - Value aggregatedRetainCond = - builder.create<arith::OrIOp>( - loc, retainCondValue, cond); - builder.create<memref::StoreOp>( - loc, aggregatedRetainCond, retainCondsMemref, - i); - builder.create<scf::YieldOp>(loc); + Value retainCondValue = memref::LoadOp::create( + builder, loc, retainCondsMemref, i); + Value aggregatedRetainCond = arith::OrIOp::create( + builder, loc, retainCondValue, cond); + memref::StoreOp::create(builder, loc, + aggregatedRetainCond, + retainCondsMemref, i); + scf::YieldOp::create(builder, loc); }); - Value doesntAlias = builder.create<arith::CmpIOp>( - loc, arith::CmpIPredicate::ne, retainValue, + Value doesntAlias = arith::CmpIOp::create( + builder, loc, arith::CmpIPredicate::ne, retainValue, toDealloc); - Value yieldValue = builder.create<arith::AndIOp>( - loc, iterArgs[0], doesntAlias); - builder.create<scf::YieldOp>(loc, yieldValue); + Value yieldValue = arith::AndIOp::create( + builder, loc, iterArgs[0], doesntAlias); + scf::YieldOp::create(builder, loc, yieldValue); }) .getResult(0); @@ -518,24 +522,24 @@ func::FuncOp mlir::bufferization::buildDeallocationLibraryFunction( loc, c0, outerIter, c1, noRetainAlias, [&](OpBuilder &builder, Location loc, Value i, ValueRange iterArgs) { - Value prevDeallocValue = builder.create<memref::LoadOp>( - loc, toDeallocMemref, i); - Value doesntAlias = builder.create<arith::CmpIOp>( - loc, arith::CmpIPredicate::ne, prevDeallocValue, - toDealloc); - Value yieldValue = builder.create<arith::AndIOp>( - loc, iterArgs[0], doesntAlias); - builder.create<scf::YieldOp>(loc, yieldValue); + Value prevDeallocValue = memref::LoadOp::create( + builder, loc, toDeallocMemref, i); + Value doesntAlias = arith::CmpIOp::create( + builder, loc, arith::CmpIPredicate::ne, + prevDeallocValue, toDealloc); + Value yieldValue = arith::AndIOp::create( + builder, loc, iterArgs[0], doesntAlias); + scf::YieldOp::create(builder, loc, yieldValue); }) .getResult(0); - Value shouldDealoc = builder.create<arith::AndIOp>(loc, noAlias, cond); - builder.create<memref::StoreOp>(loc, shouldDealoc, deallocCondsMemref, - outerIter); - builder.create<scf::YieldOp>(loc); + Value shouldDealoc = arith::AndIOp::create(builder, loc, noAlias, cond); + memref::StoreOp::create(builder, loc, shouldDealoc, deallocCondsMemref, + outerIter); + scf::YieldOp::create(builder, loc); }); - builder.create<func::ReturnOp>(loc); + func::ReturnOp::create(builder, loc); return helperFuncOp; } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp index 1eeafc4..64c178d 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp @@ -43,7 +43,7 @@ using namespace mlir::bufferization; //===----------------------------------------------------------------------===// static Value buildBoolValue(OpBuilder &builder, Location loc, bool value) { - return builder.create<arith::ConstantOp>(loc, builder.getBoolAttr(value)); + return arith::ConstantOp::create(builder, loc, builder.getBoolAttr(value)); } static bool isMemref(Value v) { return isa<BaseMemRefType>(v.getType()); } @@ -750,19 +750,18 @@ Value BufferDeallocation::materializeMemrefWithGuaranteedOwnership( // Insert a runtime check and only clone if we still don't have ownership at // runtime. - Value maybeClone = - builder - .create<scf::IfOp>( - memref.getLoc(), condition, - [&](OpBuilder &builder, Location loc) { - builder.create<scf::YieldOp>(loc, newMemref); - }, - [&](OpBuilder &builder, Location loc) { - Value clone = - builder.create<bufferization::CloneOp>(loc, newMemref); - builder.create<scf::YieldOp>(loc, clone); - }) - .getResult(0); + Value maybeClone = builder + .create<scf::IfOp>( + memref.getLoc(), condition, + [&](OpBuilder &builder, Location loc) { + scf::YieldOp::create(builder, loc, newMemref); + }, + [&](OpBuilder &builder, Location loc) { + Value clone = bufferization::CloneOp::create( + builder, loc, newMemref); + scf::YieldOp::create(builder, loc, clone); + }) + .getResult(0); Value trueVal = buildBoolValue(builder, memref.getLoc(), true); state.updateOwnership(maybeClone, trueVal); state.addMemrefToDeallocate(maybeClone, maybeClone.getParentBlock()); @@ -797,8 +796,8 @@ BufferDeallocation::handleInterface(BranchOpInterface op) { state.getMemrefsToRetain(block, op->getSuccessor(0), forwardedOperands, toRetain); - auto deallocOp = builder.create<bufferization::DeallocOp>( - op.getLoc(), memrefs, conditions, toRetain); + auto deallocOp = bufferization::DeallocOp::create( + builder, op.getLoc(), memrefs, conditions, toRetain); // We want to replace the current ownership of the retained values with the // result values of the dealloc operation as they are always unique. @@ -885,12 +884,11 @@ BufferDeallocation::handleInterface(MemoryEffectOpInterface op) { builder.setInsertionPoint(op); Ownership ownership = state.getOwnership(operand, block); if (ownership.isUnique()) { - Value ownershipInverted = builder.create<arith::XOrIOp>( - op.getLoc(), ownership.getIndicator(), + Value ownershipInverted = arith::XOrIOp::create( + builder, op.getLoc(), ownership.getIndicator(), buildBoolValue(builder, op.getLoc(), true)); - builder.create<cf::AssertOp>( - op.getLoc(), ownershipInverted, - "expected that the block does not have ownership"); + cf::AssertOp::create(builder, op.getLoc(), ownershipInverted, + "expected that the block does not have ownership"); } } } |