Go to the documentation of this file.
54 GV->setAlignment(
Align(1));
63 Value *IRBuilderBase::getCastedInt8PtrValue(
Value *Ptr) {
64 auto *PT = cast<PointerType>(Ptr->
getType());
65 if (PT->isOpaqueOrPointeeTypeMatches(
getInt8Ty()))
73 for (
auto &KV : MetadataToCopy)
74 if (KV.first == LLVMContext::MD_dbg)
75 return {cast<DILocation>(KV.second)};
80 for (
const auto &KV : MetadataToCopy)
81 if (KV.first == LLVMContext::MD_dbg) {
99 assert(isa<ConstantInt>(Scaling) &&
"Expected constant integer");
100 if (cast<ConstantInt>(Scaling)->
isZero())
106 return cast<ConstantInt>(Scaling)->getSExtValue() == 1
113 if (isa<ScalableVectorType>(DstType)) {
114 Type *StepVecType = DstType;
122 {StepVecType}, {},
nullptr,
Name);
123 if (StepVecType != DstType)
128 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
132 for (
unsigned i = 0;
i < NumEls; ++
i)
143 Ptr = getCastedInt8PtrValue(Ptr);
152 cast<MemSetInst>(CI)->setDestAlignment(
Align->
value());
159 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
162 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
171 Ptr = getCastedInt8PtrValue(Ptr);
176 M, Intrinsic::memset_element_unordered_atomic, Tys);
180 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
187 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
190 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
199 Dst = getCastedInt8PtrValue(Dst);
200 Src = getCastedInt8PtrValue(Src);
203 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
209 auto* MCI = cast<MemTransferInst>(CI);
211 MCI->setDestAlignment(*DstAlign);
213 MCI->setSourceAlignment(*SrcAlign);
221 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
224 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
227 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
236 Dst = getCastedInt8PtrValue(Dst);
237 Src = getCastedInt8PtrValue(Src);
240 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
247 auto *MCI = cast<MemCpyInlineInst>(CI);
249 MCI->setDestAlignment(*DstAlign);
251 MCI->setSourceAlignment(*SrcAlign);
255 MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
259 MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
262 MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
265 MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
274 assert(DstAlign >= ElementSize &&
275 "Pointer alignment must be at least element size");
276 assert(SrcAlign >= ElementSize &&
277 "Pointer alignment must be at least element size");
278 Dst = getCastedInt8PtrValue(Dst);
279 Src = getCastedInt8PtrValue(Src);
282 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
285 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
290 auto *AMCI = cast<AtomicMemCpyInst>(CI);
291 AMCI->setDestAlignment(DstAlign);
292 AMCI->setSourceAlignment(SrcAlign);
300 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
303 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
306 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
313 Value *Size,
bool isVolatile,
316 Dst = getCastedInt8PtrValue(Dst);
317 Src = getCastedInt8PtrValue(Src);
320 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
326 auto *MMI = cast<MemMoveInst>(CI);
328 MMI->setDestAlignment(*DstAlign);
330 MMI->setSourceAlignment(*SrcAlign);
337 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
340 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
349 assert(DstAlign >= ElementSize &&
350 "Pointer alignment must be at least element size");
351 assert(SrcAlign >= ElementSize &&
352 "Pointer alignment must be at least element size");
353 Dst = getCastedInt8PtrValue(Dst);
354 Src = getCastedInt8PtrValue(Src);
357 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
360 M, Intrinsic::memmove_element_unordered_atomic, Tys);
374 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
377 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
380 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
388 Value *Ops[] = {Src};
389 Type *Tys[] = { Src->getType() };
396 Value *Ops[] = {Acc, Src};
404 Value *Ops[] = {Acc, Src};
432 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
438 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
452 "lifetime.start only applies to pointers.");
453 Ptr = getCastedInt8PtrValue(Ptr);
458 "lifetime.start requires the size to be an i64");
459 Value *Ops[] = { Size, Ptr };
468 "lifetime.end only applies to pointers.");
469 Ptr = getCastedInt8PtrValue(Ptr);
474 "lifetime.end requires the size to be an i64");
475 Value *Ops[] = { Size, Ptr };
485 "invariant.start only applies to pointers.");
486 Ptr = getCastedInt8PtrValue(Ptr);
491 "invariant.start requires the size to be an i64");
493 Value *Ops[] = {Size, Ptr};
506 "an assumption condition must be of type i1");
517 M, Intrinsic::experimental_noalias_scope_decl, {});
533 auto *PtrTy = cast<PointerType>(Ptr->
getType());
535 assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) &&
"Wrong element type");
536 assert(
Mask &&
"Mask should not be all-ones (null)");
539 Type *OverloadedTypes[] = { Ty, PtrTy };
541 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
542 OverloadedTypes,
Name);
553 auto *PtrTy = cast<PointerType>(Ptr->
getType());
556 assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) &&
"Wrong element type");
557 assert(
Mask &&
"Mask should not be all-ones (null)");
558 Type *OverloadedTypes[] = { DataTy, PtrTy };
560 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
588 auto *VecTy = cast<VectorType>(Ty);
590 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
591 assert(cast<PointerType>(PtrsTy->getElementType())
592 ->isOpaqueOrPointeeTypeMatches(
593 cast<VectorType>(Ty)->getElementType()) &&
594 "Element type mismatch");
595 assert(NumElts == PtrsTy->getElementCount() &&
"Element count mismatch");
604 Type *OverloadedTypes[] = {Ty, PtrsTy};
609 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
622 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
623 auto *DataTy = cast<VectorType>(
Data->getType());
627 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
628 assert(NumElts == DataTy->getElementCount() &&
629 PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
630 "Incompatible pointer and data types");
637 Type *OverloadedTypes[] = {DataTy, PtrsTy};
642 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
645 template <
typename T0>
646 static std::vector<Value *>
649 std::vector<Value *>
Args;
650 Args.push_back(
B.getInt64(
ID));
651 Args.push_back(
B.getInt32(NumPatchBytes));
652 Args.push_back(ActualCallee);
653 Args.push_back(
B.getInt32(CallArgs.
size()));
654 Args.push_back(
B.getInt32(Flags));
658 Args.push_back(
B.getInt32(0));
659 Args.push_back(
B.getInt32(0));
664 template<
typename T1,
typename T2,
typename T3>
665 static std::vector<OperandBundleDef>
669 std::vector<OperandBundleDef> Rval;
673 Rval.emplace_back(
"deopt", DeoptValues);
675 if (TransitionArgs) {
678 Rval.emplace_back(
"gc-transition", TransitionValues);
683 Rval.emplace_back(
"gc-live", LiveValues);
688 template <
typename T0,
typename T1,
typename T2,
typename T3>
716 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
718 CallArgs,
None , DeoptArgs, GCArgs,
Name);
726 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
727 this,
ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
728 DeoptArgs, GCArgs,
Name);
735 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
737 CallArgs,
None, DeoptArgs, GCArgs,
Name);
740 template <
typename T0,
typename T1,
typename T2,
typename T3>
753 std::vector<Value *>
Args =
758 FnStatepoint, NormalDest, UnwindDest,
Args,
771 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
772 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
774 DeoptArgs, GCArgs,
Name);
783 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
784 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
785 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs,
Name);
793 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
794 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
803 Type *Types[] = {ResultType};
811 int BaseOffset,
int DerivedOffset,
814 Type *Types[] = {ResultType};
827 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
836 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
873 Value *ExceptV = getConstrainedFPExcept(Except);
880 {L, R, RoundingV, ExceptV},
nullptr,
Name);
882 setFPAttrs(
C, FPMathTag, UseFMF);
889 assert(Ops.
size() == 2 &&
"Invalid number of operands!");
891 Ops[0], Ops[1],
Name, FPMathTag);
894 assert(Ops.
size() == 1 &&
"Invalid number of operands!");
896 Ops[0],
Name, FPMathTag);
906 Value *ExceptV = getConstrainedFPExcept(Except);
913 bool HasRoundingMD =
false;
917 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
918 case Intrinsic::INTRINSIC: \
919 HasRoundingMD = ROUND_MODE; \
921 #include "llvm/IR/ConstrainedOps.def"
933 if (isa<FPMathOperator>(
C))
934 setFPAttrs(
C, FPMathTag, UseFMF);
938 Value *IRBuilderBase::CreateFCmpHelper(
940 MDNode *FPMathTag,
bool IsSignaling) {
942 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
943 : Intrinsic::experimental_constrained_fcmp;
947 if (
auto *LC = dyn_cast<Constant>(
LHS))
948 if (
auto *RC = dyn_cast<Constant>(
RHS))
956 Value *PredicateV = getConstrainedFPPredicate(
P);
957 Value *ExceptV = getConstrainedFPExcept(Except);
960 {L, R, PredicateV, ExceptV},
nullptr,
Name);
972 bool HasRoundingMD =
false;
973 switch (
Callee->getIntrinsicID()) {
976 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
977 case Intrinsic::INTRINSIC: \
978 HasRoundingMD = ROUND_MODE; \
980 #include "llvm/IR/ConstrainedOps.def"
983 UseArgs.push_back(getConstrainedFPRounding(
Rounding));
984 UseArgs.push_back(getConstrainedFPExcept(Except));
1000 Sel = addBranchMetadata(Sel, Prof, Unpred);
1002 if (isa<FPMathOperator>(Sel))
1003 setFPAttrs(Sel,
nullptr ,
FMF);
1010 "Pointer subtraction operand types must match!");
1012 ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
1013 "Pointer type must match element type");
1023 "launder.invariant.group only applies to pointers.");
1025 auto *PtrType = Ptr->
getType();
1026 auto *Int8PtrTy =
getInt8PtrTy(PtrType->getPointerAddressSpace());
1027 if (PtrType != Int8PtrTy)
1031 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
1036 "LaunderInvariantGroup should take and return the same type");
1040 if (PtrType != Int8PtrTy)
1047 "strip.invariant.group only applies to pointers.");
1050 auto *PtrType = Ptr->
getType();
1051 auto *Int8PtrTy =
getInt8PtrTy(PtrType->getPointerAddressSpace());
1052 if (PtrType != Int8PtrTy)
1056 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
1061 "StripInvariantGroup should take and return the same type");
1065 if (PtrType != Int8PtrTy)
1071 auto *Ty = cast<VectorType>(V->
getType());
1072 if (isa<ScalableVectorType>(Ty)) {
1075 M, Intrinsic::experimental_vector_reverse, Ty);
1080 int NumElts = Ty->getElementCount().getKnownMinValue();
1081 for (
int i = 0;
i < NumElts; ++
i)
1082 ShuffleMask.push_back(NumElts -
i - 1);
1088 assert(isa<VectorType>(V1->
getType()) &&
"Unexpected type");
1090 "Splice expects matching operand types!");
1092 if (
auto *VTy = dyn_cast<ScalableVectorType>(V1->
getType())) {
1095 M, Intrinsic::experimental_vector_splice, VTy);
1101 unsigned NumElts = cast<FixedVectorType>(V1->
getType())->getNumElements();
1102 assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1103 "Invalid immediate for vector splice!");
1106 unsigned Idx = (NumElts + Imm) % NumElts;
1108 for (
unsigned I = 0;
I < NumElts; ++
I)
1109 Mask.push_back(Idx +
I);
1122 assert(EC.isNonZero() &&
"Cannot splat to an empty vector!");
1128 Name +
".splatinsert");
1132 Zeros.
resize(EC.getKnownMinValue());
1139 auto *IntTy = cast<IntegerType>(
From->getType());
1140 assert(
DL.getTypeStoreSize(ExtractedTy) + Offset <=
1141 DL.getTypeStoreSize(IntTy) &&
1142 "Element extends past full value");
1145 if (
DL.isBigEndian())
1146 ShAmt = 8 * (
DL.getTypeStoreSize(IntTy) -
1147 DL.getTypeStoreSize(ExtractedTy) - Offset);
1152 "Cannot extract to a larger integer!");
1153 if (ExtractedTy != IntTy) {
1160 Type *ElTy,
Value *
Base,
unsigned Dimension,
unsigned LastIndex,
1164 "Invalid Base ptr type for preserve.array.access.index.");
1165 assert(cast<PointerType>(
BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1166 "Pointer element type mismatch");
1171 IdxList.push_back(LastIndexV);
1178 M, Intrinsic::preserve_array_access_index, {ResultType,
BaseType});
1182 CreateCall(FnPreserveArrayAccessIndex, {
Base, DimV, LastIndexV});
1186 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1194 "Invalid Base ptr type for preserve.union.access.index.");
1205 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1211 Type *ElTy,
Value *
Base,
unsigned Index,
unsigned FieldIndex,
1215 "Invalid Base ptr type for preserve.struct.access.index.");
1216 assert(cast<PointerType>(
BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1217 "Pointer element type mismatch");
1226 M, Intrinsic::preserve_struct_access_index, {ResultType,
BaseType});
1230 {
Base, GEPIndex, DIIndex});
1234 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1242 Value *OffsetValue) {
1245 Vals.push_back(OffsetValue);
1253 Value *OffsetValue) {
1255 "trying to create an alignment assumption on a non-pointer?");
1256 assert(Alignment != 0 &&
"Invalid Alignment");
1257 auto *PtrTy = cast<PointerType>(PtrValue->
getType());
1260 return CreateAlignmentAssumptionHelper(
DL, PtrValue, AlignValue, OffsetValue);
1266 Value *OffsetValue) {
1268 "trying to create an alignment assumption on a non-pointer?");
1269 return CreateAlignmentAssumptionHelper(
DL, PtrValue, Alignment, OffsetValue);
1275 void ConstantFolder::anchor() {}
1276 void NoFolder::anchor() {}
CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", Optional< RoundingMode > Rounding=None, Optional< fp::ExceptionBehavior > Except=None)
static std::vector< Value * > getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs)
This is an optimization pass for GlobalISel generic memory operations.
CallInst * CreateMulReduce(Value *Src)
Create a vector int mul reduction intrinsic of the source vector.
static IntegerType * getInt1Ty(LLVMContext &C)
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
virtual Value * CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const =0
A parsed version of the target data layout string in and methods for querying it.
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
const Function * getParent() const
Return the enclosing method, or null if none.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
GlobalVariable * CreateGlobalString(StringRef Str, const Twine &Name="", unsigned AddressSpace=0, Module *M=nullptr)
Make a new global variable with initializer type i8*.
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
CallInst * CreateConstrainedFPCast(Intrinsic::ID ID, Value *V, Type *DestTy, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, Optional< RoundingMode > Rounding=None, Optional< fp::ExceptionBehavior > Except=None)
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles=llvm::None)
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
void setConstrainedFPCallAttr(CallBase *I)
Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
Value * CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, MDNode *DbgInfo)
The instances of the Type class are immutable: once they are created, they are never changed.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
A container for an operand bundle being viewed as a set of values rather than a set of uses.
const IRBuilderFolder & Folder
CallInst * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
static Type * getGEPReturnType(Type *ElTy, Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
Convenience struct for specifying and reasoning about fast-math flags.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
static IntegerType * getInt32Ty(LLVMContext &C)
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
CallInst * CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.pointer.base intrinsic to get the base pointer for the specified...
CallInst * CreateInvariantStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a call to invariant.start intrinsic.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM Basic Block Representation.
CallInst * CreateElementUnorderedAtomicMemMove(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memmove between the specified pointers.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
CallInst * CreateConstrainedFPBinOp(Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, Optional< RoundingMode > Rounding=None, Optional< fp::ExceptionBehavior > Except=None)
CallInst * CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, uint64_t Size, Align Alignment, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memset of the region of memory starting at the given po...
This is the shared class of boolean and integer constants.
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
static CallInst * createCallHelper(Function *Callee, ArrayRef< Value * > Ops, IRBuilderBase *Builder, const Twine &Name="", Instruction *FMFSource=nullptr, ArrayRef< OperandBundleDef > OpBundles={})
CallInst * CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, ArrayRef< Value * > CallArgs, Optional< ArrayRef< Value * >> DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create a call to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
CallInst * CreateElementUnorderedAtomicMemCpy(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memcpy between the specified pointers.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
virtual Value * FoldSelect(Value *C, Value *True, Value *False) const =0
virtual ~IRBuilderDefaultInserter()
Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
(vector float) vec_cmpeq(*A, *B) C
Value * CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name="")
This instruction compares its operands according to the predicate given to the constructor.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Value * CreateUnOp(Instruction::UnaryOps Opc, Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
bool isVectorTy() const
True if this is an instance of VectorType.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Class to represent integer types.
static Constant * getAllOnesValue(Type *Ty)
Value * CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, MDNode *DbgInfo)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
CallInst * CreateXorReduce(Value *Src)
Create a vector int XOR reduction intrinsic of the source vector.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
LLVMContext & getContext() const
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
FunctionType * getFunctionType()
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
CallInst * CreateFPMaxReduce(Value *Src)
Create a vector float max reduction intrinsic of the source vector.
This struct is a compact representation of a valid (non-zero power of two) alignment.
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
static ElementCount getFixed(ScalarTy MinVal)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This is an important base class in LLVM.
Type * getReturnType() const
Returns the type of the ret val.
IntegerType * getIntPtrTy(const DataLayout &DL, unsigned AddrSpace=0)
Fetch the type representing a pointer to an integer value.
Value * CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, MDNode *DbgInfo)
static CallInst * getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID, Value *Src)
Module * getParent()
Get the module that this global value is contained inside of...
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Type * getParamType(unsigned i) const
Parameter type accessors.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
CallInst * CreateFPMinReduce(Value *Src)
Create a vector float min reduction intrinsic of the source vector.
CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
CallInst * CreateConstrainedFPCmp(Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, const Twine &Name="", Optional< fp::ExceptionBehavior > Except=None)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
CallInst * CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.result intrinsic to extract the result from a call wrapped in a ...
This class represents the LLVM 'select' instruction.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with args, mangled using Types.
static InvokeInst * CreateGCStatepointInvokeCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, ArrayRef< T0 > InvokeArgs, Optional< ArrayRef< T1 >> TransitionArgs, Optional< ArrayRef< T2 >> DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
A Module instance is used to store all the information related to an LLVM module.
static Constant * getSizeOf(Type *Ty)
getSizeOf constant expr - computes the (alloc) size of a type (in address-units, not bits) in a targe...
~IRBuilderCallbackInserter() override
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
SmallVector< MachineOperand, 4 > Cond
StringRef - Represent a constant reference to a string, i.e.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
CallInst * CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.get.pointer.offset intrinsic to get the offset of the specified ...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
LLVMContext & getContext() const
All values hold a context through their type.
Common base class shared among various IRBuilders.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
void SetInstDebugLocation(Instruction *I) const
If this builder has a current debug location, set it on the specified instruction.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
static Constant * get(ArrayRef< Constant * > V)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static CallInst * CreateGCStatepointCallCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs, Optional< ArrayRef< T1 >> TransitionArgs, Optional< ArrayRef< T2 >> DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static IntegerType * getInt64Ty(LLVMContext &C)
static ConstantInt * getTrue(LLVMContext &Context)
InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > InvokeArgs, Optional< ArrayRef< Value * >> DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create an invoke to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
uint64_t value() const
This is a hole in the type system and should not be abused.
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
BasicBlock * GetInsertBlock() const
virtual ~IRBuilderFolder()
Value * CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, const Twine &Name="")
Return the i64 difference between two pointer values, dividing out the size of the pointed-to objects...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
Value * CreateExtractInteger(const DataLayout &DL, Value *From, IntegerType *ExtractedTy, uint64_t Offset, const Twine &Name)
Return a value that has been extracted from a larger integer type.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
@ PrivateLinkage
Like Internal, but omit from symbol table.
CallInst * CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
size_t size() const
size - Get the array size.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallInst * CreateGCRelocate(Instruction *Statepoint, int BaseOffset, int DerivedOffset, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.relocate intrinsics to project the relocated value of one pointe...
static std::vector< OperandBundleDef > getStatepointBundles(Optional< ArrayRef< T1 >> TransitionArgs, Optional< ArrayRef< T2 >> DeoptArgs, ArrayRef< T3 > GCArgs)
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
This class represents a function call, abstracting a target machine's calling convention.
BlockVerifier::State From
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Type * getCurrentFunctionReturnType() const
Get the return type of the current function that we're emitting into.
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
CallInst * CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool IsVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
LLVM Value Representation.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.