diff options
Diffstat (limited to 'flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp')
-rw-r--r-- | flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp | 250 |
1 files changed, 127 insertions, 123 deletions
diff --git a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp index db12c84..03952da 100644 --- a/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/PPCIntrinsicCall.cpp @@ -1091,7 +1091,7 @@ void PPCIntrinsicLibrary::genMtfsf(llvm::ArrayRef<fir::ExtendedValue> args) { builder.getContext(), builder); funcOp = builder.createFunction(loc, "llvm.ppc.mtfsf", libFuncType); } - builder.create<fir::CallOp>(loc, funcOp, scalarArgs); + fir::CallOp::create(builder, loc, funcOp, scalarArgs); } // VEC_ABS @@ -1118,7 +1118,7 @@ PPCIntrinsicLibrary::genVecAbs(mlir::Type resultType, } funcOp = builder.createFunction(loc, fname, ftype); - auto callOp{builder.create<fir::CallOp>(loc, funcOp, argBases[0])}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, argBases[0])}; return callOp.getResult(0); } else if (auto eleTy = mlir::dyn_cast<mlir::IntegerType>(vTypeInfo.eleTy)) { // vec_abs(arg1) = max(0 - arg1, arg1) @@ -1128,8 +1128,8 @@ PPCIntrinsicLibrary::genVecAbs(mlir::Type resultType, // construct vector(0,..) auto zeroVal{builder.createIntegerConstant(loc, eleTy, 0)}; auto vZero{ - builder.create<mlir::vector::BroadcastOp>(loc, newVecTy, zeroVal)}; - auto zeroSubVarg1{builder.create<mlir::arith::SubIOp>(loc, vZero, varg1)}; + mlir::vector::BroadcastOp::create(builder, loc, newVecTy, zeroVal)}; + auto zeroSubVarg1{mlir::arith::SubIOp::create(builder, loc, vZero, varg1)}; mlir::func::FuncOp funcOp{nullptr}; switch (eleTy.getWidth()) { @@ -1159,7 +1159,7 @@ PPCIntrinsicLibrary::genVecAbs(mlir::Type resultType, funcOp = builder.createFunction(loc, fname, ftype); mlir::Value args[] = {zeroSubVarg1, varg1}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, args)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, args)}; return builder.createConvert(loc, argBases[0].getType(), callOp.getResult(0)); } @@ -1189,21 +1189,21 @@ fir::ExtendedValue PPCIntrinsicLibrary::genVecAddAndMulSubXor( switch (vop) { case VecOp::Add: if (isInteger) - r = builder.create<mlir::arith::AddIOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::AddIOp::create(builder, loc, vargs[0], vargs[1]); else if (isFloat) - r = builder.create<mlir::arith::AddFOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::AddFOp::create(builder, loc, vargs[0], vargs[1]); break; case VecOp::Mul: if (isInteger) - r = builder.create<mlir::arith::MulIOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::MulIOp::create(builder, loc, vargs[0], vargs[1]); else if (isFloat) - r = builder.create<mlir::arith::MulFOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::MulFOp::create(builder, loc, vargs[0], vargs[1]); break; case VecOp::Sub: if (isInteger) - r = builder.create<mlir::arith::SubIOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::SubIOp::create(builder, loc, vargs[0], vargs[1]); else if (isFloat) - r = builder.create<mlir::arith::SubFOp>(loc, vargs[0], vargs[1]); + r = mlir::arith::SubFOp::create(builder, loc, vargs[0], vargs[1]); break; case VecOp::And: case VecOp::Xor: { @@ -1217,16 +1217,16 @@ fir::ExtendedValue PPCIntrinsicLibrary::genVecAddAndMulSubXor( auto wd{mlir::dyn_cast<mlir::FloatType>(vecTyInfo.eleTy).getWidth()}; auto ftype{builder.getIntegerType(wd)}; auto bcVecTy{mlir::VectorType::get(vecTyInfo.len, ftype)}; - arg1 = builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, vargs[0]); - arg2 = builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, vargs[1]); + arg1 = mlir::vector::BitCastOp::create(builder, loc, bcVecTy, vargs[0]); + arg2 = mlir::vector::BitCastOp::create(builder, loc, bcVecTy, vargs[1]); } if (vop == VecOp::And) - r = builder.create<mlir::arith::AndIOp>(loc, arg1, arg2); + r = mlir::arith::AndIOp::create(builder, loc, arg1, arg2); else if (vop == VecOp::Xor) - r = builder.create<mlir::arith::XOrIOp>(loc, arg1, arg2); + r = mlir::arith::XOrIOp::create(builder, loc, arg1, arg2); if (isFloat) - r = builder.create<mlir::vector::BitCastOp>(loc, vargs[0].getType(), r); + r = mlir::vector::BitCastOp::create(builder, loc, vargs[0].getType(), r); break; } @@ -1342,7 +1342,7 @@ PPCIntrinsicLibrary::genVecAnyCompare(mlir::Type resultType, assert((!fname.empty() && ftype) && "invalid type"); mlir::func::FuncOp funcOp{builder.createFunction(loc, fname, ftype)}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, cmpArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, cmpArgs)}; return callOp.getResult(0); } @@ -1473,7 +1473,7 @@ PPCIntrinsicLibrary::genVecCmp(mlir::Type resultType, // arg1 < arg2 --> vcmpgt(arg2, arg1) mlir::Value vargs[]{argBases[argOrder[vop][0]], argBases[argOrder[vop][1]]}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, vargs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, vargs)}; res = callOp.getResult(0); break; } @@ -1487,14 +1487,15 @@ PPCIntrinsicLibrary::genVecCmp(mlir::Type resultType, // Construct a constant vector(-1) auto negOneVal{builder.createIntegerConstant( loc, getConvertedElementType(context, eTy), -1)}; - auto vNegOne{builder.create<mlir::vector::BroadcastOp>( - loc, vecTyInfo.toMlirVectorType(context), negOneVal)}; + auto vNegOne{mlir::vector::BroadcastOp::create( + builder, loc, vecTyInfo.toMlirVectorType(context), negOneVal)}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, vargs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, vargs)}; mlir::Value callRes{callOp.getResult(0)}; auto vargs2{ convertVecArgs(builder, loc, vecTyInfo, mlir::ValueRange{callRes})}; - auto xorRes{builder.create<mlir::arith::XOrIOp>(loc, vargs2[0], vNegOne)}; + auto xorRes{ + mlir::arith::XOrIOp::create(builder, loc, vargs2[0], vNegOne)}; res = builder.createConvert(loc, returnType, xorRes); break; @@ -1519,7 +1520,7 @@ PPCIntrinsicLibrary::genVecCmp(mlir::Type resultType, default: llvm_unreachable("Invalid vector operation for generator"); } - auto callOp{builder.create<fir::CallOp>(loc, funcOp, vargs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, vargs)}; res = callOp.getResult(0); } else llvm_unreachable("invalid vector type"); @@ -1535,13 +1536,13 @@ static inline mlir::Value swapVectorWordPairs(fir::FirOpBuilder &builder, auto vtype{mlir::VectorType::get(16, mlir::IntegerType::get(context, 8))}; if (ty != vtype) - arg = builder.create<mlir::LLVM::BitcastOp>(loc, vtype, arg).getResult(); + arg = mlir::LLVM::BitcastOp::create(builder, loc, vtype, arg).getResult(); llvm::SmallVector<int64_t, 16> mask{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}; - arg = builder.create<mlir::vector::ShuffleOp>(loc, arg, arg, mask); + arg = mlir::vector::ShuffleOp::create(builder, loc, arg, arg, mask); if (ty != vtype) - arg = builder.create<mlir::LLVM::BitcastOp>(loc, ty, arg); + arg = mlir::LLVM::BitcastOp::create(builder, loc, ty, arg); return arg; } @@ -1576,7 +1577,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, : "llvm.ppc.altivec.vcfsx"}; auto funcOp{builder.createFunction(loc, fname, ftype)}; mlir::Value newArgs[] = {argBases[0], convArg}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, newArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, newArgs)}; return callOp.getResult(0); } else if (width == 64) { @@ -1585,8 +1586,8 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, // vec_vtf(arg1, arg2) = fmul(1.0 / (1 << arg2), llvm.sitofp(arg1)) auto convOp{(isUnsigned) - ? builder.create<mlir::LLVM::UIToFPOp>(loc, ty, vArg1) - : builder.create<mlir::LLVM::SIToFPOp>(loc, ty, vArg1)}; + ? mlir::LLVM::UIToFPOp::create(builder, loc, ty, vArg1) + : mlir::LLVM::SIToFPOp::create(builder, loc, ty, vArg1)}; // construct vector<1./(1<<arg1), 1.0/(1<<arg1)> auto constInt{mlir::dyn_cast_or_null<mlir::IntegerAttr>( @@ -1595,11 +1596,11 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, assert(constInt && "expected integer constant argument"); double f{1.0 / (1 << constInt.getInt())}; llvm::SmallVector<double> vals{f, f}; - auto constOp{builder.create<mlir::arith::ConstantOp>( - loc, ty, builder.getF64VectorAttr(vals))}; + auto constOp{mlir::arith::ConstantOp::create( + builder, loc, ty, builder.getF64VectorAttr(vals))}; - auto mulOp{builder.create<mlir::LLVM::FMulOp>( - loc, ty, convOp->getResult(0), constOp)}; + auto mulOp{mlir::LLVM::FMulOp::create(builder, loc, ty, + convOp->getResult(0), constOp)}; return builder.createConvert(loc, fir::VectorType::get(2, fTy), mulOp); } @@ -1613,7 +1614,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, auto firTy{resTyInfo.toFirVectorType()}; // vec_convert(v, mold) = bitcast v to "type of mold" - auto conv{builder.create<mlir::LLVM::BitcastOp>(loc, moldTy, vArg1)}; + auto conv{mlir::LLVM::BitcastOp::create(builder, loc, moldTy, vArg1)}; return builder.createConvert(loc, firTy, conv); } @@ -1629,7 +1630,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, auto ftype{ genFuncType<Ty::RealVector<8>, Ty::RealVector<4>>(context, builder)}; auto funcOp{builder.createFunction(loc, fname, ftype)}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, newArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, newArgs)}; return callOp.getResult(0); } else if (vecTyInfo.isFloat64()) { @@ -1638,7 +1639,7 @@ PPCIntrinsicLibrary::genVecConvert(mlir::Type resultType, genFuncType<Ty::RealVector<4>, Ty::RealVector<8>>(context, builder)}; auto funcOp{builder.createFunction(loc, fname, ftype)}; newArgs[0] = - builder.create<fir::CallOp>(loc, funcOp, newArgs).getResult(0); + fir::CallOp::create(builder, loc, funcOp, newArgs).getResult(0); auto fvf32Ty{newArgs[0].getType()}; auto f32type{mlir::Float32Type::get(context)}; auto mvf32Ty{mlir::VectorType::get(4, f32type)}; @@ -1662,7 +1663,7 @@ static mlir::Value convertVectorElementOrder(fir::FirOpBuilder &builder, mlir::Value idx) { mlir::Value numSub1{ builder.createIntegerConstant(loc, idx.getType(), vecInfo.len - 1)}; - return builder.create<mlir::LLVM::SubOp>(loc, idx.getType(), numSub1, idx); + return mlir::LLVM::SubOp::create(builder, loc, idx.getType(), numSub1, idx); } // VEC_EXTRACT @@ -1681,14 +1682,14 @@ PPCIntrinsicLibrary::genVecExtract(mlir::Type resultType, // position auto numEle{builder.createIntegerConstant(loc, argTypes[1], vecTyInfo.len)}; mlir::Value uremOp{ - builder.create<mlir::LLVM::URemOp>(loc, argBases[1], numEle)}; + mlir::LLVM::URemOp::create(builder, loc, argBases[1], numEle)}; if (!isNativeVecElemOrderOnLE()) uremOp = convertVectorElementOrder(builder, loc, vecTyInfo, uremOp); mlir::Value index = builder.createOrFold<mlir::index::CastUOp>( loc, builder.getIndexType(), uremOp); - return builder.create<mlir::vector::ExtractOp>(loc, varg0, index); + return mlir::vector::ExtractOp::create(builder, loc, varg0, index); } // VEC_INSERT @@ -1704,7 +1705,7 @@ PPCIntrinsicLibrary::genVecInsert(mlir::Type resultType, auto numEle{builder.createIntegerConstant(loc, argTypes[2], vecTyInfo.len)}; mlir::Value uremOp{ - builder.create<mlir::LLVM::URemOp>(loc, argBases[2], numEle)}; + mlir::LLVM::URemOp::create(builder, loc, argBases[2], numEle)}; if (!isNativeVecElemOrderOnLE()) uremOp = convertVectorElementOrder(builder, loc, vecTyInfo, uremOp); @@ -1712,8 +1713,8 @@ PPCIntrinsicLibrary::genVecInsert(mlir::Type resultType, mlir::Value index = builder.createOrFold<mlir::index::CastUOp>( loc, builder.getIndexType(), uremOp); mlir::Value res = - builder.create<mlir::vector::InsertOp>(loc, argBases[0], varg1, index); - return builder.create<fir::ConvertOp>(loc, vecTyInfo.toFirVectorType(), res); + mlir::vector::InsertOp::create(builder, loc, argBases[0], varg1, index); + return fir::ConvertOp::create(builder, loc, vecTyInfo.toFirVectorType(), res); } // VEC_MERGEH, VEC_MERGEL @@ -1799,8 +1800,8 @@ PPCIntrinsicLibrary::genVecMerge(mlir::Type resultType, llvm::SmallVector<int64_t, 16> &mergeMask = (isBEVecElemOrderOnLE()) ? rMask : mMask; - auto callOp{builder.create<mlir::vector::ShuffleOp>(loc, vargs[0], vargs[1], - mergeMask)}; + auto callOp{mlir::vector::ShuffleOp::create(builder, loc, vargs[0], vargs[1], + mergeMask)}; return builder.createConvert(loc, resultType, callOp); } @@ -1812,9 +1813,9 @@ static mlir::Value addOffsetToAddress(fir::FirOpBuilder &builder, auto arrRefTy{builder.getRefType(fir::SequenceType::get( {typeExtent}, mlir::IntegerType::get(builder.getContext(), 8)))}; // Convert arg to !fir.ref<!ref.array<?xi8>> - auto resAddr{builder.create<fir::ConvertOp>(loc, arrRefTy, baseAddr)}; + auto resAddr{fir::ConvertOp::create(builder, loc, arrRefTy, baseAddr)}; - return builder.create<fir::CoordinateOp>(loc, arrRefTy, resAddr, offset); + return fir::CoordinateOp::create(builder, loc, arrRefTy, resAddr, offset); } static mlir::Value reverseVectorElements(fir::FirOpBuilder &builder, @@ -1826,8 +1827,8 @@ static mlir::Value reverseVectorElements(fir::FirOpBuilder &builder, for (int64_t i = 0; i < len; ++i) { mask.push_back(len - 1 - i); } - auto undefVec{builder.create<fir::UndefOp>(loc, v.getType())}; - return builder.create<mlir::vector::ShuffleOp>(loc, v, undefVec, mask); + auto undefVec{fir::UndefOp::create(builder, loc, v.getType())}; + return mlir::vector::ShuffleOp::create(builder, loc, v, undefVec, mask); } static mlir::NamedAttribute getAlignmentAttr(fir::FirOpBuilder &builder, @@ -1876,8 +1877,8 @@ fir::ExtendedValue PPCIntrinsicLibrary::genVecLdNoCallGrp( const auto triple{fir::getTargetTriple(builder.getModule())}; // Need to get align 1. - auto result{builder.create<fir::LoadOp>(loc, mlirTy, addr, - getAlignmentAttr(builder, 1))}; + auto result{fir::LoadOp::create(builder, loc, mlirTy, addr, + getAlignmentAttr(builder, 1))}; if ((vop == VecOp::Xl && isBEVecElemOrderOnLE()) || (vop == VecOp::Xlbe && triple.isLittleEndian())) return builder.createConvert( @@ -1970,13 +1971,13 @@ PPCIntrinsicLibrary::genVecLdCallGrp(mlir::Type resultType, mlir::FunctionType::get(context, {addr.getType()}, {intrinResTy})}; auto funcOp{builder.createFunction(loc, fname, funcType)}; auto result{ - builder.create<fir::CallOp>(loc, funcOp, parsedArgs).getResult(0)}; + fir::CallOp::create(builder, loc, funcOp, parsedArgs).getResult(0)}; if (vop == VecOp::Lxvp) return result; if (intrinResTy != mlirTy) - result = builder.create<mlir::vector::BitCastOp>(loc, mlirTy, result); + result = mlir::vector::BitCastOp::create(builder, loc, mlirTy, result); if (vop != VecOp::Xld2 && vop != VecOp::Xlw4 && isBEVecElemOrderOnLE()) return builder.createConvert( @@ -2003,13 +2004,13 @@ PPCIntrinsicLibrary::genVecLvsGrp(mlir::Type resultType, // Convert arg0 to i64 type if needed auto i64ty{mlir::IntegerType::get(context, 64)}; if (arg0.getType() != i64ty) - arg0 = builder.create<fir::ConvertOp>(loc, i64ty, arg0); + arg0 = fir::ConvertOp::create(builder, loc, i64ty, arg0); // offset is modulo 16, so shift left 56 bits and then right 56 bits to clear // upper 56 bit while preserving sign auto shiftVal{builder.createIntegerConstant(loc, i64ty, 56)}; - auto offset{builder.create<mlir::arith::ShLIOp>(loc, arg0, shiftVal)}; - auto offset2{builder.create<mlir::arith::ShRSIOp>(loc, offset, shiftVal)}; + auto offset{mlir::arith::ShLIOp::create(builder, loc, arg0, shiftVal)}; + auto offset2{mlir::arith::ShRSIOp::create(builder, loc, offset, shiftVal)}; // Add the offsetArg to %addr of arg1 auto addr{addOffsetToAddress(builder, loc, arg1, offset2)}; @@ -2029,7 +2030,7 @@ PPCIntrinsicLibrary::genVecLvsGrp(mlir::Type resultType, auto funcType{mlir::FunctionType::get(context, {addr.getType()}, {mlirTy})}; auto funcOp{builder.createFunction(loc, fname, funcType)}; auto result{ - builder.create<fir::CallOp>(loc, funcOp, parsedArgs).getResult(0)}; + fir::CallOp::create(builder, loc, funcOp, parsedArgs).getResult(0)}; if (isNativeVecElemOrderOnLE()) return builder.createConvert( @@ -2066,19 +2067,19 @@ PPCIntrinsicLibrary::genVecNmaddMsub(mlir::Type resultType, std::get<1>(fmaMap[width]))}; if (vop == VecOp::Nmadd) { // vec_nmadd(arg1, arg2, arg3) = -fma(arg1, arg2, arg3) - auto callOp{builder.create<fir::CallOp>(loc, funcOp, newArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, newArgs)}; // We need to convert fir.vector to MLIR vector to use fneg and then back // to fir.vector to store. auto vCall{builder.createConvert(loc, vTypeInfo.toMlirVectorType(context), callOp.getResult(0))}; - auto neg{builder.create<mlir::arith::NegFOp>(loc, vCall)}; + auto neg{mlir::arith::NegFOp::create(builder, loc, vCall)}; return builder.createConvert(loc, vTypeInfo.toFirVectorType(), neg); } else if (vop == VecOp::Msub) { // vec_msub(arg1, arg2, arg3) = fma(arg1, arg2, -arg3) - newArgs[2] = builder.create<mlir::arith::NegFOp>(loc, newArgs[2]); + newArgs[2] = mlir::arith::NegFOp::create(builder, loc, newArgs[2]); - auto callOp{builder.create<fir::CallOp>(loc, funcOp, newArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, newArgs)}; return callOp.getResult(0); } llvm_unreachable("Invalid vector operation for generator"); @@ -2109,10 +2110,10 @@ PPCIntrinsicLibrary::genVecPerm(mlir::Type resultType, auto mMask{builder.createConvert(loc, mlirMaskTy, argBases[2])}; if (mlirTy != vi32Ty) { - mArg0 = - builder.create<mlir::LLVM::BitcastOp>(loc, vi32Ty, mArg0).getResult(); - mArg1 = - builder.create<mlir::LLVM::BitcastOp>(loc, vi32Ty, mArg1).getResult(); + mArg0 = mlir::LLVM::BitcastOp::create(builder, loc, vi32Ty, mArg0) + .getResult(); + mArg1 = mlir::LLVM::BitcastOp::create(builder, loc, vi32Ty, mArg1) + .getResult(); } auto funcOp{builder.createFunction( @@ -2127,23 +2128,23 @@ PPCIntrinsicLibrary::genVecPerm(mlir::Type resultType, auto v8Ty{mlir::VectorType::get(16, i8Ty)}; auto negOne{builder.createMinusOneInteger(loc, i8Ty)}; auto vNegOne{ - builder.create<mlir::vector::BroadcastOp>(loc, v8Ty, negOne)}; + mlir::vector::BroadcastOp::create(builder, loc, v8Ty, negOne)}; - mMask = builder.create<mlir::arith::XOrIOp>(loc, mMask, vNegOne); + mMask = mlir::arith::XOrIOp::create(builder, loc, mMask, vNegOne); newArgs = {mArg1, mArg0, mMask}; } else { newArgs = {mArg0, mArg1, mMask}; } - auto res{builder.create<fir::CallOp>(loc, funcOp, newArgs).getResult(0)}; + auto res{fir::CallOp::create(builder, loc, funcOp, newArgs).getResult(0)}; if (res.getType() != argTypes[0]) { // fir.call llvm.ppc.altivec.vperm returns !fir.vector<i4:32> // convert the result back to the original type res = builder.createConvert(loc, vi32Ty, res); if (mlirTy != vi32Ty) - res = - builder.create<mlir::LLVM::BitcastOp>(loc, mlirTy, res).getResult(); + res = mlir::LLVM::BitcastOp::create(builder, loc, mlirTy, res) + .getResult(); } return builder.createConvert(loc, resultType, res); } @@ -2156,10 +2157,10 @@ PPCIntrinsicLibrary::genVecPerm(mlir::Type resultType, auto constInt{constIntOp.getInt()}; // arg1, arg2, and result type share same VecTypeInfo if (vecTyInfo.isFloat()) { - mArg0 = - builder.create<mlir::LLVM::BitcastOp>(loc, vf64Ty, mArg0).getResult(); - mArg1 = - builder.create<mlir::LLVM::BitcastOp>(loc, vf64Ty, mArg1).getResult(); + mArg0 = mlir::LLVM::BitcastOp::create(builder, loc, vf64Ty, mArg0) + .getResult(); + mArg1 = mlir::LLVM::BitcastOp::create(builder, loc, vf64Ty, mArg1) + .getResult(); } llvm::SmallVector<int64_t, 2> nMask; // native vector element order mask @@ -2188,9 +2189,9 @@ PPCIntrinsicLibrary::genVecPerm(mlir::Type resultType, llvm::SmallVector<int64_t, 2> mask = (isBEVecElemOrderOnLE()) ? rMask : nMask; - auto res{builder.create<mlir::vector::ShuffleOp>(loc, mArg0, mArg1, mask)}; + auto res{mlir::vector::ShuffleOp::create(builder, loc, mArg0, mArg1, mask)}; if (res.getType() != mlirTy) { - auto cast{builder.create<mlir::LLVM::BitcastOp>(loc, mlirTy, res)}; + auto cast{mlir::LLVM::BitcastOp::create(builder, loc, mlirTy, res)}; return builder.createConvert(loc, resultType, cast); } return builder.createConvert(loc, resultType, res); @@ -2217,22 +2218,23 @@ PPCIntrinsicLibrary::genVecSel(mlir::Type resultType, // construct a constant <16 x i8> vector with value -1 for bitcast auto bcVecTy{mlir::VectorType::get(16, i8Ty)}; - auto vNegOne{builder.create<mlir::vector::BroadcastOp>(loc, bcVecTy, negOne)}; + auto vNegOne{ + mlir::vector::BroadcastOp::create(builder, loc, bcVecTy, negOne)}; // bitcast arguments to bcVecTy - auto arg1{builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, vargs[0])}; - auto arg2{builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, vargs[1])}; - auto arg3{builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, vargs[2])}; + auto arg1{mlir::vector::BitCastOp::create(builder, loc, bcVecTy, vargs[0])}; + auto arg2{mlir::vector::BitCastOp::create(builder, loc, bcVecTy, vargs[1])}; + auto arg3{mlir::vector::BitCastOp::create(builder, loc, bcVecTy, vargs[2])}; // vec_sel(arg1, arg2, arg3) = // (arg2 and arg3) or (arg1 and (arg3 xor vector(-1,...))) - auto comp{builder.create<mlir::arith::XOrIOp>(loc, arg3, vNegOne)}; - auto a1AndComp{builder.create<mlir::arith::AndIOp>(loc, arg1, comp)}; - auto a1OrA2{builder.create<mlir::arith::AndIOp>(loc, arg2, arg3)}; - auto res{builder.create<mlir::arith::OrIOp>(loc, a1AndComp, a1OrA2)}; + auto comp{mlir::arith::XOrIOp::create(builder, loc, arg3, vNegOne)}; + auto a1AndComp{mlir::arith::AndIOp::create(builder, loc, arg1, comp)}; + auto a1OrA2{mlir::arith::AndIOp::create(builder, loc, arg2, arg3)}; + auto res{mlir::arith::OrIOp::create(builder, loc, a1AndComp, a1OrA2)}; auto bcRes{ - builder.create<mlir::vector::BitCastOp>(loc, vargs[0].getType(), res)}; + mlir::vector::BitCastOp::create(builder, loc, vargs[0].getType(), res)}; return builder.createConvert(loc, vecTyInfos[0].toFirVectorType(), bcRes); } @@ -2269,14 +2271,14 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType, auto vecVal{builder.createIntegerConstant( loc, getConvertedElementType(context, vecTyInfoArgs[0].eleTy), width)}; auto mask{ - builder.create<mlir::vector::BroadcastOp>(loc, mlirTyArgs[1], vecVal)}; - auto shft{builder.create<mlir::arith::RemUIOp>(loc, mlirVecArgs[1], mask)}; + mlir::vector::BroadcastOp::create(builder, loc, mlirTyArgs[1], vecVal)}; + auto shft{mlir::arith::RemUIOp::create(builder, loc, mlirVecArgs[1], mask)}; mlir::Value res{nullptr}; if (vop == VecOp::Sr) - res = builder.create<mlir::arith::ShRUIOp>(loc, mlirVecArgs[0], shft); + res = mlir::arith::ShRUIOp::create(builder, loc, mlirVecArgs[0], shft); else if (vop == VecOp::Sl) - res = builder.create<mlir::arith::ShLIOp>(loc, mlirVecArgs[0], shft); + res = mlir::arith::ShLIOp::create(builder, loc, mlirVecArgs[0], shft); shftRes = builder.createConvert(loc, argTypes[0], res); } else if (vop == VecOp::Sll || vop == VecOp::Slo || vop == VecOp::Srl || @@ -2286,11 +2288,11 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType, // Bitcast to vector<4xi32> auto bcVecTy{mlir::VectorType::get(4, builder.getIntegerType(32))}; if (mlirTyArgs[0] != bcVecTy) - mlirVecArgs[0] = - builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, mlirVecArgs[0]); + mlirVecArgs[0] = mlir::vector::BitCastOp::create(builder, loc, bcVecTy, + mlirVecArgs[0]); if (mlirTyArgs[1] != bcVecTy) - mlirVecArgs[1] = - builder.create<mlir::vector::BitCastOp>(loc, bcVecTy, mlirVecArgs[1]); + mlirVecArgs[1] = mlir::vector::BitCastOp::create(builder, loc, bcVecTy, + mlirVecArgs[1]); llvm::StringRef funcName; switch (vop) { @@ -2312,13 +2314,13 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType, auto funcTy{genFuncType<Ty::IntegerVector<4>, Ty::IntegerVector<4>, Ty::IntegerVector<4>>(context, builder)}; mlir::func::FuncOp funcOp{builder.createFunction(loc, funcName, funcTy)}; - auto callOp{builder.create<fir::CallOp>(loc, funcOp, mlirVecArgs)}; + auto callOp{fir::CallOp::create(builder, loc, funcOp, mlirVecArgs)}; // If the result vector type is different from the original type, need // to convert to mlir vector, bitcast and then convert back to fir vector. if (callOp.getResult(0).getType() != argTypes[0]) { auto res = builder.createConvert(loc, bcVecTy, callOp.getResult(0)); - res = builder.create<mlir::vector::BitCastOp>(loc, mlirTyArgs[0], res); + res = mlir::vector::BitCastOp::create(builder, loc, mlirTyArgs[0], res); shftRes = builder.createConvert(loc, argTypes[0], res); } else { shftRes = callOp.getResult(0); @@ -2334,10 +2336,10 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType, auto vi8Ty{mlir::VectorType::get(16, builder.getIntegerType(8))}; if (mlirTyArgs[0] != vi8Ty) { mlirVecArgs[0] = - builder.create<mlir::LLVM::BitcastOp>(loc, vi8Ty, mlirVecArgs[0]) + mlir::LLVM::BitcastOp::create(builder, loc, vi8Ty, mlirVecArgs[0]) .getResult(); mlirVecArgs[1] = - builder.create<mlir::LLVM::BitcastOp>(loc, vi8Ty, mlirVecArgs[1]) + mlir::LLVM::BitcastOp::create(builder, loc, vi8Ty, mlirVecArgs[1]) .getResult(); } @@ -2352,19 +2354,19 @@ PPCIntrinsicLibrary::genVecShift(mlir::Type resultType, if (triple.isLittleEndian()) { for (int i = 16; i < 32; ++i) mask.push_back(i - shiftVal); - shftRes = builder.create<mlir::vector::ShuffleOp>(loc, mlirVecArgs[1], - mlirVecArgs[0], mask); + shftRes = mlir::vector::ShuffleOp::create(builder, loc, mlirVecArgs[1], + mlirVecArgs[0], mask); } else { for (int i = 0; i < 16; ++i) mask.push_back(i + shiftVal); - shftRes = builder.create<mlir::vector::ShuffleOp>(loc, mlirVecArgs[0], - mlirVecArgs[1], mask); + shftRes = mlir::vector::ShuffleOp::create(builder, loc, mlirVecArgs[0], + mlirVecArgs[1], mask); } // Bitcast to the original type if (shftRes.getType() != mlirTyArgs[0]) shftRes = - builder.create<mlir::LLVM::BitcastOp>(loc, mlirTyArgs[0], shftRes); + mlir::LLVM::BitcastOp::create(builder, loc, mlirTyArgs[0], shftRes); return builder.createConvert(loc, resultType, shftRes); } else @@ -2389,8 +2391,9 @@ PPCIntrinsicLibrary::genVecSplat(mlir::Type resultType, auto vecTyInfo{getVecTypeFromFir(argBases[0])}; auto extractOp{genVecExtract(resultType, args)}; - splatOp = builder.create<mlir::vector::SplatOp>( - loc, *(extractOp.getUnboxed()), vecTyInfo.toMlirVectorType(context)); + splatOp = + mlir::vector::SplatOp::create(builder, loc, *(extractOp.getUnboxed()), + vecTyInfo.toMlirVectorType(context)); retTy = vecTyInfo.toFirVectorType(); break; } @@ -2398,8 +2401,8 @@ PPCIntrinsicLibrary::genVecSplat(mlir::Type resultType, assert(args.size() == 1); auto vecTyInfo{getVecTypeFromEle(argBases[0])}; - splatOp = builder.create<mlir::vector::SplatOp>( - loc, argBases[0], vecTyInfo.toMlirVectorType(context)); + splatOp = mlir::vector::SplatOp::create( + builder, loc, argBases[0], vecTyInfo.toMlirVectorType(context)); retTy = vecTyInfo.toFirVectorType(); break; } @@ -2409,8 +2412,8 @@ PPCIntrinsicLibrary::genVecSplat(mlir::Type resultType, auto intOp{builder.createConvert(loc, eleTy, argBases[0])}; // the intrinsic always returns vector(integer(4)) - splatOp = builder.create<mlir::vector::SplatOp>( - loc, intOp, mlir::VectorType::get(4, eleTy)); + splatOp = mlir::vector::SplatOp::create(builder, loc, intOp, + mlir::VectorType::get(4, eleTy)); retTy = fir::VectorType::get(4, eleTy); break; } @@ -2438,14 +2441,14 @@ PPCIntrinsicLibrary::genVecXlds(mlir::Type resultType, auto i64Ty{mlir::IntegerType::get(builder.getContext(), 64)}; auto i64VecTy{mlir::VectorType::get(2, i64Ty)}; auto i64RefTy{builder.getRefType(i64Ty)}; - auto addrConv{builder.create<fir::ConvertOp>(loc, i64RefTy, addr)}; + auto addrConv{fir::ConvertOp::create(builder, loc, i64RefTy, addr)}; - auto addrVal{builder.create<fir::LoadOp>(loc, addrConv)}; - auto splatRes{builder.create<mlir::vector::SplatOp>(loc, addrVal, i64VecTy)}; + auto addrVal{fir::LoadOp::create(builder, loc, addrConv)}; + auto splatRes{mlir::vector::SplatOp::create(builder, loc, addrVal, i64VecTy)}; mlir::Value result{nullptr}; if (mlirTy != splatRes.getType()) { - result = builder.create<mlir::vector::BitCastOp>(loc, mlirTy, splatRes); + result = mlir::vector::BitCastOp::create(builder, loc, mlirTy, splatRes); } else result = splatRes; @@ -2795,7 +2798,7 @@ void PPCIntrinsicLibrary::genMmaIntr(llvm::ArrayRef<fir::ExtendedValue> args) { if (i == 0 && HandlerOp == MMAHandlerOp::FirstArgIsResult) { // First argument is passed in as an address. We need to load // the content to match the LLVM interface. - v = builder.create<fir::LoadOp>(loc, v); + v = fir::LoadOp::create(builder, loc, v); } auto vType{v.getType()}; mlir::Type targetType{intrFuncType.getInput(j)}; @@ -2806,7 +2809,7 @@ void PPCIntrinsicLibrary::genMmaIntr(llvm::ArrayRef<fir::ExtendedValue> args) { auto len{mlir::dyn_cast<fir::VectorType>(vType).getLen()}; mlir::VectorType mlirType = mlir::VectorType::get(len, eleTy); auto v0{builder.createConvert(loc, mlirType, v)}; - auto v1{builder.create<mlir::vector::BitCastOp>(loc, targetType, v0)}; + auto v1{mlir::vector::BitCastOp::create(builder, loc, targetType, v0)}; intrArgs.push_back(v1); } else if (mlir::isa<mlir::IntegerType>(targetType) && mlir::isa<mlir::IntegerType>(vType)) { @@ -2822,7 +2825,7 @@ void PPCIntrinsicLibrary::genMmaIntr(llvm::ArrayRef<fir::ExtendedValue> args) { intrArgs.push_back(v); } } - auto callSt{builder.create<fir::CallOp>(loc, funcOp, intrArgs)}; + auto callSt{fir::CallOp::create(builder, loc, funcOp, intrArgs)}; if (HandlerOp == MMAHandlerOp::SubToFunc || HandlerOp == MMAHandlerOp::SubToFuncReverseArgOnLE || HandlerOp == MMAHandlerOp::FirstArgIsResult) { @@ -2831,10 +2834,11 @@ void PPCIntrinsicLibrary::genMmaIntr(llvm::ArrayRef<fir::ExtendedValue> args) { mlir::Value destPtr{fir::getBase(args[0])}; mlir::Type callResultPtrType{builder.getRefType(callResult.getType())}; if (destPtr.getType() != callResultPtrType) { - destPtr = builder.create<fir::ConvertOp>(loc, callResultPtrType, destPtr); + destPtr = + fir::ConvertOp::create(builder, loc, callResultPtrType, destPtr); } // Copy the result. - builder.create<fir::StoreOp>(loc, callResult, destPtr); + fir::StoreOp::create(builder, loc, callResult, destPtr); } } @@ -2901,7 +2905,7 @@ void PPCIntrinsicLibrary::genVecStore(llvm::ArrayRef<fir::ExtendedValue> args) { if (vop == VecOp::Stxvp) { biArgs.push_back(argBases[0]); biArgs.push_back(addr); - builder.create<fir::CallOp>(loc, funcOp, biArgs); + fir::CallOp::create(builder, loc, funcOp, biArgs); return; } @@ -2911,7 +2915,7 @@ void PPCIntrinsicLibrary::genVecStore(llvm::ArrayRef<fir::ExtendedValue> args) { mlir::Value newArg1{nullptr}; if (stTy != arg1TyInfo.toMlirVectorType(context)) - newArg1 = builder.create<mlir::vector::BitCastOp>(loc, stTy, cnv); + newArg1 = mlir::vector::BitCastOp::create(builder, loc, stTy, cnv); else newArg1 = cnv; @@ -2922,7 +2926,7 @@ void PPCIntrinsicLibrary::genVecStore(llvm::ArrayRef<fir::ExtendedValue> args) { biArgs.push_back(newArg1); biArgs.push_back(addr); - builder.create<fir::CallOp>(loc, funcOp, biArgs); + fir::CallOp::create(builder, loc, funcOp, biArgs); } // VEC_XST, VEC_XST_BE, VEC_STXV, VEC_XSTD2, VEC_XSTW4 @@ -2971,7 +2975,7 @@ void PPCIntrinsicLibrary::genVecXStore( mlir::Type srcTy{nullptr}; if (numElem != arg1TyInfo.len) { - cnv = builder.create<mlir::vector::BitCastOp>(loc, mlirVecTy, cnv); + cnv = mlir::vector::BitCastOp::create(builder, loc, mlirVecTy, cnv); srcTy = firVecTy; } else { srcTy = arg1TyInfo.toFirVectorType(); @@ -2994,9 +2998,9 @@ void PPCIntrinsicLibrary::genVecXStore( default: assert(false && "Invalid vector operation for generator"); } - builder.create<fir::StoreOp>(loc, mlir::TypeRange{}, - mlir::ValueRange{src, trg}, - getAlignmentAttr(builder, 1)); + fir::StoreOp::create(builder, loc, mlir::TypeRange{}, + mlir::ValueRange{src, trg}, + getAlignmentAttr(builder, 1)); } } // namespace fir |