aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/RISCV')
-rw-r--r--llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp20
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td24
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td38
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td124
-rw-r--r--llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp6
7 files changed, 47 insertions, 172 deletions
diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
index ab93bba..b00589a 100644
--- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
+++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp
@@ -68,7 +68,7 @@ const llvm::StringRef RISCVSEWInstrument::DESC_NAME = "RISCV-SEW";
bool RISCVSEWInstrument::isDataValid(llvm::StringRef Data) {
// Return true if not one of the valid SEW strings
return StringSwitch<bool>(Data)
- .Cases("E8", "E16", "E32", "E64", true)
+ .Cases({"E8", "E16", "E32", "E64"}, true)
.Default(false);
}
diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
index 52dc53e..25b5af8 100644
--- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
@@ -495,18 +495,19 @@ RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
VectorType *DataType;
Value *StoreVal = nullptr, *Ptr, *Mask, *EVL = nullptr;
- MaybeAlign MA;
+ Align Alignment;
switch (II->getIntrinsicID()) {
case Intrinsic::masked_gather:
DataType = cast<VectorType>(II->getType());
Ptr = II->getArgOperand(0);
- MA = cast<ConstantInt>(II->getArgOperand(1))->getMaybeAlignValue();
- Mask = II->getArgOperand(2);
+ Alignment = II->getParamAlign(0).valueOrOne();
+ Mask = II->getArgOperand(1);
break;
case Intrinsic::vp_gather:
DataType = cast<VectorType>(II->getType());
Ptr = II->getArgOperand(0);
- MA = II->getParamAlign(0).value_or(
+ // FIXME: Falling back to ABI alignment is incorrect.
+ Alignment = II->getParamAlign(0).value_or(
DL->getABITypeAlign(DataType->getElementType()));
Mask = II->getArgOperand(1);
EVL = II->getArgOperand(2);
@@ -515,14 +516,15 @@ bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
DataType = cast<VectorType>(II->getArgOperand(0)->getType());
StoreVal = II->getArgOperand(0);
Ptr = II->getArgOperand(1);
- MA = cast<ConstantInt>(II->getArgOperand(2))->getMaybeAlignValue();
- Mask = II->getArgOperand(3);
+ Alignment = II->getParamAlign(1).valueOrOne();
+ Mask = II->getArgOperand(2);
break;
case Intrinsic::vp_scatter:
DataType = cast<VectorType>(II->getArgOperand(0)->getType());
StoreVal = II->getArgOperand(0);
Ptr = II->getArgOperand(1);
- MA = II->getParamAlign(1).value_or(
+ // FIXME: Falling back to ABI alignment is incorrect.
+ Alignment = II->getParamAlign(1).value_or(
DL->getABITypeAlign(DataType->getElementType()));
Mask = II->getArgOperand(2);
EVL = II->getArgOperand(3);
@@ -533,7 +535,7 @@ bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
// Make sure the operation will be supported by the backend.
EVT DataTypeVT = TLI->getValueType(*DL, DataType);
- if (!MA || !TLI->isLegalStridedLoadStore(DataTypeVT, *MA))
+ if (!TLI->isLegalStridedLoadStore(DataTypeVT, Alignment))
return false;
// FIXME: Let the backend type legalize by splitting/widening?
@@ -571,7 +573,7 @@ bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II) {
// Merge llvm.masked.gather's passthru
if (II->getIntrinsicID() == Intrinsic::masked_gather)
- Call = Builder.CreateSelect(Mask, Call, II->getArgOperand(3));
+ Call = Builder.CreateSelect(Mask, Call, II->getArgOperand(2));
} else
Call = Builder.CreateIntrinsic(
Intrinsic::experimental_vp_strided_store,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 169465e..0a53ba9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -12649,10 +12649,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
// Reassemble the low and high pieces reversed.
- // FIXME: This is a CONCAT_VECTORS.
- SDValue Res = DAG.getInsertSubvector(DL, DAG.getUNDEF(VecVT), Hi, 0);
- return DAG.getInsertSubvector(DL, Res, Lo,
- LoVT.getVectorMinNumElements());
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, Hi, Lo);
}
// Just promote the int type to i16 which will double the LMUL.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 66717b9..7c89686 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1511,16 +1511,16 @@ def GIShiftMask32 :
GIComplexOperandMatcher<s64, "selectShiftMask32">,
GIComplexPatternEquiv<shiftMask32>;
-class shiftop<SDPatternOperator operator>
- : PatFrag<(ops node:$val, node:$count),
- (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>;
-class shiftopw<SDPatternOperator operator>
- : PatFrag<(ops node:$val, node:$count),
- (operator node:$val, (i64 (shiftMask32 node:$count)))>;
+class PatGprShiftMaskXLen<SDPatternOperator OpNode, RVInst Inst>
+ : Pat<(OpNode GPR:$rs1, shiftMaskXLen:$rs2),
+ (Inst GPR:$rs1, shiftMaskXLen:$rs2)>;
+class PatGprShiftMask32<SDPatternOperator OpNode, RVInst Inst>
+ : Pat<(OpNode GPR:$rs1, shiftMask32:$rs2),
+ (Inst GPR:$rs1, shiftMask32:$rs2)>;
-def : PatGprGpr<shiftop<shl>, SLL>;
-def : PatGprGpr<shiftop<srl>, SRL>;
-def : PatGprGpr<shiftop<sra>, SRA>;
+def : PatGprShiftMaskXLen<shl, SLL>;
+def : PatGprShiftMaskXLen<srl, SRL>;
+def : PatGprShiftMaskXLen<sra, SRA>;
// This is a special case of the ADD instruction used to facilitate the use of a
// fourth operand to emit a relocation on a symbol relating to this instruction.
@@ -2203,9 +2203,9 @@ def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
def : Pat<(i64 (sra (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)),
(SRAIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>;
-def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>;
-def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>;
-def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>;
+def : PatGprShiftMask32<riscv_sllw, SLLW>;
+def : PatGprShiftMask32<riscv_srlw, SRLW>;
+def : PatGprShiftMask32<riscv_sraw, SRAW>;
// Select W instructions if only the lower 32 bits of the result are used.
def : PatGprGpr<binop_allwusers<add>, ADDW>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 57fbaa0..62b7bcd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -506,8 +506,8 @@ def : Pat<(XLenVT (xor GPR:$rs1, invLogicImm:$rs2)), (XNOR GPR:$rs1, invLogicImm
} // Predicates = [HasStdExtZbbOrZbkb]
let Predicates = [HasStdExtZbbOrZbkb] in {
-def : PatGprGpr<shiftop<rotl>, ROL>;
-def : PatGprGpr<shiftop<rotr>, ROR>;
+def : PatGprShiftMaskXLen<rotl, ROL>;
+def : PatGprShiftMaskXLen<rotr, ROR>;
def : PatGprImm<rotr, RORI, uimmlog2xlen>;
// There's no encoding for roli in the the 'B' extension as it can be
@@ -517,29 +517,29 @@ def : Pat<(XLenVT (rotl GPR:$rs1, uimmlog2xlen:$shamt)),
} // Predicates = [HasStdExtZbbOrZbkb]
let Predicates = [HasStdExtZbbOrZbkb, IsRV64] in {
-def : PatGprGpr<shiftopw<riscv_rolw>, ROLW>;
-def : PatGprGpr<shiftopw<riscv_rorw>, RORW>;
+def : PatGprShiftMask32<riscv_rolw, ROLW>;
+def : PatGprShiftMask32<riscv_rorw, RORW>;
def : PatGprImm<riscv_rorw, RORIW, uimm5>;
def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
(RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
} // Predicates = [HasStdExtZbbOrZbkb, IsRV64]
let Predicates = [HasStdExtZbs] in {
-def : Pat<(XLenVT (and (not (shiftop<shl> 1, (XLenVT GPR:$rs2))), GPR:$rs1)),
- (BCLR GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (and (rotl -2, (XLenVT GPR:$rs2)), GPR:$rs1)),
- (BCLR GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (or (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
- (BSET GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (xor (shiftop<shl> 1, (XLenVT GPR:$rs2)), GPR:$rs1)),
- (BINV GPR:$rs1, GPR:$rs2)>;
-def : Pat<(XLenVT (and (shiftop<srl> GPR:$rs1, (XLenVT GPR:$rs2)), 1)),
- (BEXT GPR:$rs1, GPR:$rs2)>;
-
-def : Pat<(XLenVT (shiftop<shl> 1, (XLenVT GPR:$rs2))),
- (BSET (XLenVT X0), GPR:$rs2)>;
-def : Pat<(XLenVT (not (shiftop<shl> -1, (XLenVT GPR:$rs2)))),
- (ADDI (XLenVT (BSET (XLenVT X0), GPR:$rs2)), -1)>;
+def : Pat<(XLenVT (and (not (shl 1, shiftMaskXLen:$rs2)), GPR:$rs1)),
+ (BCLR GPR:$rs1, shiftMaskXLen:$rs2)>;
+def : Pat<(XLenVT (and (rotl -2, shiftMaskXLen:$rs2), GPR:$rs1)),
+ (BCLR GPR:$rs1, shiftMaskXLen:$rs2)>;
+def : Pat<(XLenVT (or (shl 1, shiftMaskXLen:$rs2), GPR:$rs1)),
+ (BSET GPR:$rs1, shiftMaskXLen:$rs2)>;
+def : Pat<(XLenVT (xor (shl 1, shiftMaskXLen:$rs2), GPR:$rs1)),
+ (BINV GPR:$rs1, shiftMaskXLen:$rs2)>;
+def : Pat<(XLenVT (and (srl GPR:$rs1, shiftMaskXLen:$rs2), 1)),
+ (BEXT GPR:$rs1, shiftMaskXLen:$rs2)>;
+
+def : Pat<(XLenVT (shl 1, shiftMaskXLen:$rs2)),
+ (BSET (XLenVT X0), shiftMaskXLen:$rs2)>;
+def : Pat<(XLenVT (not (shl -1, shiftMaskXLen:$rs2))),
+ (ADDI (XLenVT (BSET (XLenVT X0), shiftMaskXLen:$rs2)), -1)>;
def : Pat<(XLenVT (and GPR:$rs1, BCLRMask:$mask)),
(BCLRI GPR:$rs1, BCLRMask:$mask)>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
index 9358486..f7d1a09 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td
@@ -438,130 +438,6 @@ let Predicates = [HasStdExtZvfbfmin] in {
FRM_DYN,
fvti.AVL, fvti.Log2SEW, TA_MA)>;
}
-
- defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBF16Vectors>;
- defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
- AllBF16Vectors, uimm5>;
- defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
- eew=16, vtilist=AllBF16Vectors>;
- defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBF16Vectors, uimm5>;
- defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBF16Vectors, uimm5>;
-
- foreach fvti = AllBF16Vectors in {
- defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM",
- fvti.Vector,
- fvti.Vector, fvti.Vector, fvti.Mask,
- fvti.Log2SEW, fvti.LMul, fvti.RegClass,
- fvti.RegClass, fvti.RegClass>;
- defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE",
- "V"#fvti.ScalarSuffix#"M",
- fvti.Vector,
- fvti.Vector, fvti.Scalar, fvti.Mask,
- fvti.Log2SEW, fvti.LMul, fvti.RegClass,
- fvti.RegClass, fvti.ScalarRegClass>;
- defvar instr = !cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX);
- def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru),
- (fvti.Vector fvti.RegClass:$rs2),
- (fvti.Scalar (fpimm0)),
- (fvti.Mask VMV0:$vm), VLOpFrag)),
- (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0,
- (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
-
- defvar ivti = GetIntVTypeInfo<fvti>.Vti;
- def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1,
- fvti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
- (fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
- fvti.AVL, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
- (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
- fvti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
- (fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
- (SplatFPOp (fvti.Scalar fpimm0)),
- fvti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- (fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
- (SplatFPOp fvti.ScalarRegClass:$rs1),
- fvti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
- (fvti.Vector (IMPLICIT_DEF)),
- fvti.RegClass:$rs2,
- (fvti.Scalar fvti.ScalarRegClass:$rs1),
- (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
- fvti.RegClass:$rs1,
- fvti.RegClass:$rs2,
- fvti.RegClass:$passthru,
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
- (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))),
- fvti.RegClass:$rs2,
- fvti.RegClass:$passthru,
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMERGE_VXM_"#fvti.LMul.MX)
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
-
-
- def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
- (SplatFPOp (fvti.Scalar fpimm0)),
- fvti.RegClass:$rs2,
- fvti.RegClass:$passthru,
- VLOpFrag)),
- (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm),
- GPR:$vl, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm),
- (SplatFPOp fvti.ScalarRegClass:$rs1),
- fvti.RegClass:$rs2,
- fvti.RegClass:$passthru,
- VLOpFrag)),
- (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
- fvti.RegClass:$passthru, fvti.RegClass:$rs2,
- (fvti.Scalar fvti.ScalarRegClass:$rs1),
- (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>;
-
- def : Pat<(fvti.Vector
- (riscv_vrgather_vv_vl fvti.RegClass:$rs2,
- (ivti.Vector fvti.RegClass:$rs1),
- fvti.RegClass:$passthru,
- (fvti.Mask VMV0:$vm),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVRGATHER_VV_"# fvti.LMul.MX#"_E"# fvti.SEW#"_MASK")
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1,
- (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
- def : Pat<(fvti.Vector (riscv_vrgather_vx_vl fvti.RegClass:$rs2, GPR:$rs1,
- fvti.RegClass:$passthru,
- (fvti.Mask VMV0:$vm),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVRGATHER_VX_"# fvti.LMul.MX#"_MASK")
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$rs1,
- (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
- def : Pat<(fvti.Vector
- (riscv_vrgather_vx_vl fvti.RegClass:$rs2,
- uimm5:$imm,
- fvti.RegClass:$passthru,
- (fvti.Mask VMV0:$vm),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVRGATHER_VI_"# fvti.LMul.MX#"_MASK")
- fvti.RegClass:$passthru, fvti.RegClass:$rs2, uimm5:$imm,
- (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>;
- }
}
let Predicates = [HasStdExtZvfbfwma] in {
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
index 5e10631..528bbdf 100644
--- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
@@ -169,9 +169,9 @@ static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy,
}
case Intrinsic::masked_load: {
Ptr = II->getOperand(0);
- Alignment = cast<ConstantInt>(II->getArgOperand(1))->getAlignValue();
+ Alignment = II->getParamAlign(0).valueOrOne();
- if (!isa<UndefValue>(II->getOperand(3)))
+ if (!isa<UndefValue>(II->getOperand(2)))
return false;
assert(Mask && "masked.load needs a mask!");
@@ -183,7 +183,7 @@ static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy,
}
case Intrinsic::masked_store: {
Ptr = II->getOperand(1);
- Alignment = cast<ConstantInt>(II->getArgOperand(2))->getAlignValue();
+ Alignment = II->getParamAlign(1).valueOrOne();
assert(Mask && "masked.store needs a mask!");