54 GV->setAlignment(
Align(1));
64 for (
auto &KV : MetadataToCopy)
65 if (KV.first == LLVMContext::MD_dbg)
66 return {cast<DILocation>(KV.second)};
71 for (
const auto &KV : MetadataToCopy)
72 if (KV.first == LLVMContext::MD_dbg) {
89 assert(isa<ConstantInt>(Scaling) &&
"Expected constant integer");
90 if (cast<ConstantInt>(Scaling)->
isZero())
96 return cast<ConstantInt>(Scaling)->isOne() ? CI :
CreateMul(CI, Scaling);
111 if (isa<ScalableVectorType>(DstType)) {
112 Type *StepVecType = DstType;
120 {StepVecType}, {},
nullptr,
Name);
121 if (StepVecType != DstType)
126 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
130 for (
unsigned i = 0; i < NumEls; ++i)
149 cast<MemSetInst>(CI)->setDestAlignment(*
Align);
156 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
159 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
166 bool IsVolatile,
MDNode *TBAATag,
170 Type *Tys[] = {Dst->getType(),
Size->getType()};
177 cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
184 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
187 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
200 M, Intrinsic::memset_element_unordered_atomic, Tys);
204 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
211 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
214 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
223 assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline ||
224 IntrID == Intrinsic::memmove) &&
225 "Unexpected intrinsic ID");
227 Type *Tys[] = { Dst->getType(), Src->getType(),
Size->getType() };
233 auto* MCI = cast<MemTransferInst>(CI);
235 MCI->setDestAlignment(*DstAlign);
237 MCI->setSourceAlignment(*SrcAlign);
245 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
248 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
251 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
260 assert(DstAlign >= ElementSize &&
261 "Pointer alignment must be at least element size");
262 assert(SrcAlign >= ElementSize &&
263 "Pointer alignment must be at least element size");
265 Type *Tys[] = {Dst->getType(), Src->getType(),
Size->getType()};
268 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
273 auto *AMCI = cast<AtomicMemCpyInst>(CI);
274 AMCI->setDestAlignment(DstAlign);
275 AMCI->setSourceAlignment(SrcAlign);
283 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
286 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
289 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
296 assert(Val &&
"isConstantOne does not work with nullptr Val");
297 const ConstantInt *CVal = dyn_cast<ConstantInt>(Val);
298 return CVal && CVal->
isOne();
311 else if (ArraySize->
getType() != IntPtrTy)
316 AllocSize = ArraySize;
319 AllocSize =
CreateMul(ArraySize, AllocSize,
"mallocsize");
323 assert(AllocSize->
getType() == IntPtrTy &&
"malloc arg is wrong size");
330 MallocFunc = M->getOrInsertFunction(
"malloc", BPTy, IntPtrTy);
336 F->setReturnDoesNotAlias();
348 return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, std::nullopt,
355 assert(Source->getType()->isPointerTy() &&
356 "Can not free something of nonpointer type!");
363 FunctionCallee FreeFunc = M->getOrInsertFunction(
"free", VoidTy, VoidPtrTy);
365 Result->setTailCall();
367 Result->setCallingConv(
F->getCallingConv());
376 assert(DstAlign >= ElementSize &&
377 "Pointer alignment must be at least element size");
378 assert(SrcAlign >= ElementSize &&
379 "Pointer alignment must be at least element size");
381 Type *Tys[] = {Dst->getType(), Src->getType(),
Size->getType()};
384 M, Intrinsic::memmove_element_unordered_atomic, Tys);
398 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
401 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
404 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
411 Value *Ops[] = {Src};
412 Type *Tys[] = { Src->getType() };
419 Value *Ops[] = {Acc, Src};
427 Value *Ops[] = {Acc, Src};
434 return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
438 return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
442 return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
446 return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
450 return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
455 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
456 return getReductionIntrinsic(
ID, Src);
461 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
462 return getReductionIntrinsic(
ID, Src);
466 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
470 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
474 return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum, Src);
478 return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
482 assert(isa<PointerType>(
Ptr->getType()) &&
483 "lifetime.start only applies to pointers.");
488 "lifetime.start requires the size to be an i64");
497 assert(isa<PointerType>(
Ptr->getType()) &&
498 "lifetime.end only applies to pointers.");
503 "lifetime.end requires the size to be an i64");
513 assert(isa<PointerType>(
Ptr->getType()) &&
514 "invariant.start only applies to pointers.");
519 "invariant.start requires the size to be an i64");
523 Type *ObjectPtr[1] = {
Ptr->getType()};
531 if (
auto *O = dyn_cast<GlobalObject>(
Ptr))
532 return O->getAlign();
533 if (
auto *
A = dyn_cast<GlobalAlias>(
Ptr))
534 return A->getAliaseeObject()->getAlign();
539 assert(isa<GlobalValue>(
Ptr) && cast<GlobalValue>(
Ptr)->isThreadLocal() &&
540 "threadlocal_address only applies to thread local variables.");
554 "an assumption condition must be of type i1");
565 M, Intrinsic::experimental_noalias_scope_decl, {});
581 auto *PtrTy = cast<PointerType>(
Ptr->getType());
583 assert(Mask &&
"Mask should not be all-ones (null)");
586 Type *OverloadedTypes[] = { Ty, PtrTy };
588 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
589 OverloadedTypes,
Name);
600 auto *PtrTy = cast<PointerType>(
Ptr->getType());
603 assert(Mask &&
"Mask should not be all-ones (null)");
604 Type *OverloadedTypes[] = { DataTy, PtrTy };
606 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
634 auto *VecTy = cast<VectorType>(Ty);
636 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
637 assert(NumElts == PtrsTy->getElementCount() &&
"Element count mismatch");
645 Type *OverloadedTypes[] = {Ty, PtrsTy};
650 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
663 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
664 auto *DataTy = cast<VectorType>(
Data->getType());
670 Type *OverloadedTypes[] = {DataTy, PtrsTy};
675 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
690 assert(Mask &&
"Mask should not be all-ones (null)");
693 Type *OverloadedTypes[] = {Ty};
694 Value *Ops[] = {
Ptr, Mask, PassThru};
695 return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
696 OverloadedTypes,
Name);
708 assert(Mask &&
"Mask should not be all-ones (null)");
709 Type *OverloadedTypes[] = {DataTy};
711 return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
715template <
typename T0>
716static std::vector<Value *>
719 std::vector<Value *> Args;
720 Args.push_back(
B.getInt64(
ID));
721 Args.push_back(
B.getInt32(NumPatchBytes));
722 Args.push_back(ActualCallee);
723 Args.push_back(
B.getInt32(CallArgs.
size()));
724 Args.push_back(
B.getInt32(Flags));
728 Args.push_back(
B.getInt32(0));
729 Args.push_back(
B.getInt32(0));
734template<
typename T1,
typename T2,
typename T3>
735static std::vector<OperandBundleDef>
739 std::vector<OperandBundleDef> Rval;
743 Rval.emplace_back(
"deopt", DeoptValues);
745 if (TransitionArgs) {
748 Rval.emplace_back(
"gc-transition", TransitionValues);
753 Rval.emplace_back(
"gc-live", LiveValues);
758template <
typename T0,
typename T1,
typename T2,
typename T3>
772 *Builder,
ID, NumPatchBytes, ActualCallee.
getCallee(), Flags, CallArgs);
787 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
789 CallArgs, std::nullopt , DeoptArgs, GCArgs,
Name);
798 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
799 this,
ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
800 DeoptArgs, GCArgs,
Name);
807 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
809 CallArgs, std::nullopt, DeoptArgs, GCArgs,
Name);
812template <
typename T0,
typename T1,
typename T2,
typename T3>
826 std::vector<Value *> Args =
831 FnStatepoint, NormalDest, UnwindDest, Args,
844 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
845 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
847 std::nullopt , DeoptArgs, GCArgs,
Name);
856 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
857 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
858 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs,
Name);
866 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
867 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
876 Type *Types[] = {ResultType};
879 Value *Args[] = {Statepoint};
884 int BaseOffset,
int DerivedOffset,
887 Type *Types[] = {ResultType};
900 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
909 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
918 return createCallHelper(Fn, {V},
Name, FMFSource);
927 return createCallHelper(Fn, {
LHS,
RHS},
Name, FMFSource);
937 return createCallHelper(Fn, Args,
Name, FMFSource);
957 matchIntrinsicSignature(FTy,
TableRef, OverloadTys);
960 "Wrong types for intrinsic!");
964 return createCallHelper(Fn, Args,
Name, FMFSource);
970 std::optional<RoundingMode> Rounding,
971 std::optional<fp::ExceptionBehavior> Except) {
972 Value *RoundingV = getConstrainedFPRounding(Rounding);
973 Value *ExceptV = getConstrainedFPExcept(Except);
980 {L, R, RoundingV, ExceptV},
nullptr,
Name);
982 setFPAttrs(
C, FPMathTag, UseFMF);
989 std::optional<fp::ExceptionBehavior> Except) {
990 Value *ExceptV = getConstrainedFPExcept(Except);
999 setFPAttrs(
C, FPMathTag, UseFMF);
1006 assert(Ops.
size() == 2 &&
"Invalid number of operands!");
1008 Ops[0], Ops[1],
Name, FPMathTag);
1011 assert(Ops.
size() == 1 &&
"Invalid number of operands!");
1013 Ops[0],
Name, FPMathTag);
1021 std::optional<RoundingMode> Rounding,
1022 std::optional<fp::ExceptionBehavior> Except) {
1023 Value *ExceptV = getConstrainedFPExcept(Except);
1030 bool HasRoundingMD =
false;
1034#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1035 case Intrinsic::INTRINSIC: \
1036 HasRoundingMD = ROUND_MODE; \
1038#include "llvm/IR/ConstrainedOps.def"
1040 if (HasRoundingMD) {
1041 Value *RoundingV = getConstrainedFPRounding(Rounding);
1050 if (isa<FPMathOperator>(
C))
1051 setFPAttrs(
C, FPMathTag, UseFMF);
1055Value *IRBuilderBase::CreateFCmpHelper(
1057 MDNode *FPMathTag,
bool IsSignaling) {
1059 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
1060 : Intrinsic::experimental_constrained_fcmp;
1064 if (
auto *LC = dyn_cast<Constant>(LHS))
1065 if (
auto *RC = dyn_cast<Constant>(RHS))
1072 const Twine &
Name, std::optional<fp::ExceptionBehavior> Except) {
1073 Value *PredicateV = getConstrainedFPPredicate(
P);
1074 Value *ExceptV = getConstrainedFPExcept(Except);
1077 {L, R, PredicateV, ExceptV},
nullptr,
Name);
1084 std::optional<RoundingMode> Rounding,
1085 std::optional<fp::ExceptionBehavior> Except) {
1089 bool HasRoundingMD =
false;
1090 switch (Callee->getIntrinsicID()) {
1093#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1094 case Intrinsic::INTRINSIC: \
1095 HasRoundingMD = ROUND_MODE; \
1097#include "llvm/IR/ConstrainedOps.def"
1100 UseArgs.
push_back(getConstrainedFPRounding(Rounding));
1101 UseArgs.
push_back(getConstrainedFPExcept(Except));
1117 Sel = addBranchMetadata(Sel, Prof, Unpred);
1119 if (isa<FPMathOperator>(Sel))
1120 setFPAttrs(Sel,
nullptr ,
FMF);
1127 "Pointer subtraction operand types must match!");
1136 assert(isa<PointerType>(
Ptr->getType()) &&
1137 "launder.invariant.group only applies to pointers.");
1138 auto *PtrType =
Ptr->getType();
1141 M, Intrinsic::launder_invariant_group, {PtrType});
1146 "LaunderInvariantGroup should take and return the same type");
1152 assert(isa<PointerType>(
Ptr->getType()) &&
1153 "strip.invariant.group only applies to pointers.");
1155 auto *PtrType =
Ptr->getType();
1158 M, Intrinsic::strip_invariant_group, {PtrType});
1163 "StripInvariantGroup should take and return the same type");
1169 auto *Ty = cast<VectorType>(V->getType());
1170 if (isa<ScalableVectorType>(Ty)) {
1173 M, Intrinsic::experimental_vector_reverse, Ty);
1178 int NumElts = Ty->getElementCount().getKnownMinValue();
1179 for (
int i = 0; i < NumElts; ++i)
1186 assert(isa<VectorType>(V1->
getType()) &&
"Unexpected type");
1188 "Splice expects matching operand types!");
1190 if (
auto *VTy = dyn_cast<ScalableVectorType>(V1->
getType())) {
1193 M, Intrinsic::experimental_vector_splice, VTy);
1199 unsigned NumElts = cast<FixedVectorType>(V1->
getType())->getNumElements();
1200 assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1201 "Invalid immediate for vector splice!");
1204 unsigned Idx = (NumElts + Imm) % NumElts;
1206 for (
unsigned I = 0;
I < NumElts; ++
I)
1207 Mask.push_back(
Idx +
I);
1220 assert(EC.isNonZero() &&
"Cannot splat to an empty vector!");
1228 Zeros.
resize(EC.getKnownMinValue());
1233 Type *ElTy,
Value *
Base,
unsigned Dimension,
unsigned LastIndex,
1237 "Invalid Base ptr type for preserve.array.access.index.");
1248 M, Intrinsic::preserve_array_access_index, {ResultType,
BaseType});
1252 CreateCall(FnPreserveArrayAccessIndex, {
Base, DimV, LastIndexV});
1256 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1264 "Invalid Base ptr type for preserve.union.access.index.");
1275 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1285 "Invalid Base ptr type for preserve.struct.access.index.");
1294 M, Intrinsic::preserve_struct_access_index, {ResultType,
BaseType});
1298 {
Base, GEPIndex, DIIndex});
1302 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1312 return CreateCall(FnIsFPClass, {FPNum, TestV});
1318 Value *OffsetValue) {
1329 Value *OffsetValue) {
1331 "trying to create an alignment assumption on a non-pointer?");
1332 assert(Alignment != 0 &&
"Invalid Alignment");
1333 auto *PtrTy = cast<PointerType>(PtrValue->
getType());
1336 return CreateAlignmentAssumptionHelper(
DL, PtrValue, AlignValue, OffsetValue);
1342 Value *OffsetValue) {
1344 "trying to create an alignment assumption on a non-pointer?");
1345 return CreateAlignmentAssumptionHelper(
DL, PtrValue, Alignment, OffsetValue);
1351void ConstantFolder::anchor() {}
1352void NoFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
ArrayRef< TableEntry > TableRef
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isConstantOne(const Value *Val)
isConstantOne - Return true only if val is constant int 1
static InvokeInst * CreateGCStatepointInvokeCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, ArrayRef< T0 > InvokeArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static CallInst * CreateGCStatepointCallCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static std::vector< OperandBundleDef > getStatepointBundles(std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs)
static std::vector< Value * > getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
void setCallingConv(CallingConv::ID CC)
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * getSizeOf(Type *Ty)
getSizeOf constant expr - computes the (alloc) size of a type (in address-units, not bits) in a targe...
This is the shared class of boolean and integer constants.
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionType * getFunctionType()
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Type * getReturnType() const
Returns the type of the ret val.
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
Module * getParent()
Get the module that this global value is contained inside of...
@ PrivateLinkage
Like Internal, but omit from symbol table.
Common base class shared among various IRBuilders.
Value * CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateElementUnorderedAtomicMemCpy(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memcpy between the specified pointers.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, const Twine &Name="")
Return the i64 difference between two pointer values, dividing out the size of the pointed-to objects...
CallInst * CreateMulReduce(Value *Src)
Create a vector int mul reduction intrinsic of the source vector.
CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
CallInst * CreateThreadLocalAddress(Value *Ptr)
Create a call to llvm.threadlocal.address intrinsic.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Type * getCurrentFunctionReturnType() const
Get the return type of the current function that we're emitting into.
CallInst * CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.pointer.base intrinsic to get the base pointer for the specified...
CallInst * CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, ArrayRef< Value * > CallArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create a call to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
GlobalVariable * CreateGlobalString(StringRef Str, const Twine &Name="", unsigned AddressSpace=0, Module *M=nullptr)
Make a new global variable with initializer type i8*.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateConstrainedFPCmp(Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, const Twine &Name="", std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateFree(Value *Source, ArrayRef< OperandBundleDef > Bundles=std::nullopt)
Generate the IR for a call to the builtin free function.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, MDNode *DbgInfo)
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
InvokeInst * CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > OpBundles, const Twine &Name="")
Create an invoke instruction.
CallInst * CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.get.pointer.offset intrinsic to get the offset of the specified ...
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
IntegerType * getIntPtrTy(const DataLayout &DL, unsigned AddrSpace=0)
Fetch the type of an integer with size at least as big as that of a pointer in the given address spac...
CallInst * CreateConstrainedFPUnroundedBinOp(Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
CallInst * CreateXorReduce(Value *Src)
Create a vector int XOR reduction intrinsic of the source vector.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * getAllOnesMask(ElementCount NumElts)
Return an all true boolean vector (mask) with NumElts lanes.
Value * CreateUnOp(Instruction::UnaryOps Opc, Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
CallInst * CreateMalloc(Type *IntPtrTy, Type *AllocTy, Value *AllocSize, Value *ArraySize, ArrayRef< OperandBundleDef > OpB, Function *MallocF=nullptr, const Twine &Name="")
CallInst * CreateFPMinReduce(Value *Src)
Create a vector float min reduction intrinsic of the source vector.
CallInst * CreateFPMaximumReduce(Value *Src)
Create a vector float maximum reduction intrinsic of the source vector.
Value * createIsFPClass(Value *FPNum, unsigned Test)
CallInst * CreateFPMaxReduce(Value *Src)
Create a vector float max reduction intrinsic of the source vector.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
CallInst * CreateConstrainedFPBinOp(Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
CallInst * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles=std::nullopt)
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, MDNode *DbgInfo)
CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.result intrinsic to extract the result from a call wrapped in a ...
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateElementCount(Type *DstType, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
CallInst * CreateConstrainedFPCast(Intrinsic::ID ID, Value *V, Type *DestTy, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateElementUnorderedAtomicMemMove(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memmove between the specified pointers.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
void setConstrainedFPCallAttr(CallBase *I)
InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > InvokeArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create an invoke to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const IRBuilderFolder & Folder
CallInst * CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val, Value *Size, bool IsVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
CallInst * CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, uint64_t Size, Align Alignment, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memset of the region of memory starting at the given po...
CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
void SetInstDebugLocation(Instruction *I) const
If this builder has a current debug location, set it on the specified instruction.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateGCRelocate(Instruction *Statepoint, int BaseOffset, int DerivedOffset, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.relocate intrinsics to project the relocated value of one pointe...
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
Value * CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, MDNode *DbgInfo)
CallInst * CreateInvariantStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a call to invariant.start intrinsic.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateFPMinimumReduce(Value *Src)
Create a vector float minimum reduction intrinsic of the source vector.
Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
~IRBuilderCallbackInserter() override
virtual ~IRBuilderDefaultInserter()
virtual Value * FoldSelect(Value *C, Value *True, Value *False) const =0
virtual Value * CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const =0
virtual ~IRBuilderFolder()
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
MatchIntrinsicTypesResult
@ MatchIntrinsicTypes_Match
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
This is an optimization pass for GlobalISel generic memory operations.
bool getAlign(const Function &F, unsigned index, unsigned &align)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.