Go to the documentation of this file.
43 assert(MD &&
"First operand of DbgVariableIntrinsic should be non-null.");
46 if (
auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
50 if (
auto *
AL = dyn_cast<DIArgList>(MD))
60 assert(MD &&
"First operand of DbgVariableIntrinsic should be non-null.");
61 if (
auto *
AL = dyn_cast<DIArgList>(MD))
62 return AL->getArgs()[OpIdx]->getValue();
66 isa<ValueAsMetadata>(MD) &&
67 "Attempted to get location operand from DbgVariableIntrinsic with none.");
68 auto *V = cast<ValueAsMetadata>(MD);
69 assert(OpIdx == 0 &&
"Operand Index must be 0 for a debug intrinsic with a "
70 "single location operand.");
75 return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
76 cast<MetadataAsValue>(V)->getMetadata())
82 assert(NewValue &&
"Values must be non-null");
84 auto OldIt =
find(Locations, OldValue);
85 assert(OldIt != Locations.end() &&
"OldValue must be a current location");
87 Value *NewOperand = isa<MetadataAsValue>(NewValue)
91 return setArgOperand(0, NewOperand);
95 for (
auto *VMD : Locations)
96 MDs.push_back(VMD == *OldIt ? NewOperand :
getAsMetadata(VMD));
104 Value *NewOperand = isa<MetadataAsValue>(NewValue)
108 return setArgOperand(0, NewOperand);
113 MDs.push_back(Idx == OpIdx ? NewOperand
123 "NewExpr for debug variable intrinsic does not reference every "
124 "location operand.");
130 for (
auto *VMD : NewValues)
138 return Fragment->SizeInBits;
154 const char *
const *
Low = NameTable.
begin();
155 const char *
const *
High = NameTable.
end();
156 const char *
const *LastLow =
Low;
158 size_t CmpStart = CmpEnd;
159 CmpEnd =
Name.find(
'.', CmpStart + 1);
161 auto Cmp = [CmpStart, CmpEnd](
const char *
LHS,
const char *
RHS) {
162 return strncmp(
LHS + CmpStart,
RHS + CmpStart, CmpEnd - CmpStart) < 0;
170 if (LastLow == NameTable.
end())
173 if (
Name == NameFound ||
174 (
Name.startswith(NameFound) &&
Name[NameFound.
size()] ==
'.'))
175 return LastLow - NameTable.
begin();
203 auto *MAV = dyn_cast<MetadataAsValue>(
getArgOperand(NumOperands - 2));
205 MD = MAV->getMetadata();
206 if (!MD || !isa<MDString>(MD))
215 auto *MAV = dyn_cast<MetadataAsValue>(
getArgOperand(NumOperands - 1));
217 MD = MAV->getMetadata();
218 if (!MD || !isa<MDString>(MD))
240 Metadata *MD = cast<MetadataAsValue>(
Op)->getMetadata();
241 if (!MD || !isa<MDString>(MD))
269 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
270 case Intrinsic::INTRINSIC: \
272 #include "llvm/IR/ConstrainedOps.def"
280 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
281 case Intrinsic::INTRINSIC: \
283 #include "llvm/IR/ConstrainedOps.def"
288 switch (
I->getIntrinsicID()) {
289 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
290 case Intrinsic::INTRINSIC:
291 #include "llvm/IR/ConstrainedOps.def"
300 const auto *VT = cast<VectorType>(
T);
301 auto ElemCount = VT->getElementCount();
309 "Unexpected VP intrinsic without mask operand");
310 return GetVectorLengthOfType(
getType());
312 return GetVectorLengthOfType(VPMask->
getType());
338 switch (IntrinsicID) {
342 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
343 case Intrinsic::VPID: \
345 #include "llvm/IR/VPIntrinsics.def"
351 switch (IntrinsicID) {
355 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
356 case Intrinsic::VPID: \
358 #include "llvm/IR/VPIntrinsics.def"
381 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
382 #define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS;
383 #define END_REGISTER_VP_INTRINSIC(VPID) break;
384 #include "llvm/IR/VPIntrinsics.def"
392 if (!DataParamOpt.hasValue())
401 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
402 #define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS;
403 #define END_REGISTER_VP_INTRINSIC(VPID) break;
404 #include "llvm/IR/VPIntrinsics.def"
413 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
414 case Intrinsic::VPID: \
416 #include "llvm/IR/VPIntrinsics.def"
426 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
427 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC;
428 #define END_REGISTER_VP_INTRINSIC(VPID) break;
429 #include "llvm/IR/VPIntrinsics.def"
439 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break;
440 #define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC:
441 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
442 #include "llvm/IR/VPIntrinsics.def"
448 using namespace PatternMatch;
463 if (EC.isScalable()) {
468 const auto &
DL = ParMod->getDataLayout();
473 return VScaleFactor >= EC.getKnownMinValue();
478 const auto *VLConst = dyn_cast<ConstantInt>(VLParam);
482 uint64_t VLNum = VLConst->getZExtValue();
483 if (VLNum >= EC.getKnownMinValue())
496 Type *OverloadTy = Params[0]->getType();
504 case Intrinsic::vp_trunc:
505 case Intrinsic::vp_sext:
506 case Intrinsic::vp_zext:
507 case Intrinsic::vp_fptoui:
508 case Intrinsic::vp_fptosi:
509 case Intrinsic::vp_uitofp:
510 case Intrinsic::vp_sitofp:
511 case Intrinsic::vp_fptrunc:
512 case Intrinsic::vp_fpext:
513 case Intrinsic::vp_ptrtoint:
514 case Intrinsic::vp_inttoptr:
518 case Intrinsic::vp_merge:
519 case Intrinsic::vp_select:
522 case Intrinsic::vp_load:
526 case Intrinsic::experimental_vp_strided_load:
530 case Intrinsic::vp_gather:
534 case Intrinsic::vp_store:
536 M, VPID, {Params[0]->getType(), Params[1]->
getType()});
538 case Intrinsic::experimental_vp_strided_store:
541 {Params[0]->getType(), Params[1]->
getType(), Params[2]->
getType()});
543 case Intrinsic::vp_scatter:
545 M, VPID, {Params[0]->getType(), Params[1]->
getType()});
548 assert(VPFunc &&
"Could not declare VP intrinsic");
556 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
557 #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
558 #define END_REGISTER_VP_INTRINSIC(VPID) break;
559 #include "llvm/IR/VPIntrinsics.def"
568 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
569 #define VP_PROPERTY_CASTOP return true;
570 #define END_REGISTER_VP_INTRINSIC(VPID) break;
571 #include "llvm/IR/VPIntrinsics.def"
580 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
581 #define VP_PROPERTY_CMP(CCPOS, ...) return true;
582 #define END_REGISTER_VP_INTRINSIC(VPID) break;
583 #include "llvm/IR/VPIntrinsics.def"
589 Metadata *MD = cast<MetadataAsValue>(
Op)->getMetadata();
590 if (!MD || !isa<MDString>(MD))
612 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
613 #define VP_PROPERTY_CMP(CCPOS, ISFP) \
617 #define END_REGISTER_VP_INTRINSIC(VPID) break;
618 #include "llvm/IR/VPIntrinsics.def"
620 assert(CCArgIdx.
hasValue() &&
"Unexpected vector-predicated comparison");
635 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
636 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS;
637 #define END_REGISTER_VP_INTRINSIC(VPID) break;
638 #include "llvm/IR/VPIntrinsics.def"
647 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
648 #define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS;
649 #define END_REGISTER_VP_INTRINSIC(VPID) break;
650 #include "llvm/IR/VPIntrinsics.def"
659 case Intrinsic::uadd_with_overflow:
660 case Intrinsic::sadd_with_overflow:
661 case Intrinsic::uadd_sat:
662 case Intrinsic::sadd_sat:
664 case Intrinsic::usub_with_overflow:
665 case Intrinsic::ssub_with_overflow:
666 case Intrinsic::usub_sat:
667 case Intrinsic::ssub_sat:
668 return Instruction::Sub;
669 case Intrinsic::umul_with_overflow:
670 case Intrinsic::smul_with_overflow:
679 case Intrinsic::sadd_with_overflow:
680 case Intrinsic::ssub_with_overflow:
681 case Intrinsic::smul_with_overflow:
682 case Intrinsic::sadd_sat:
683 case Intrinsic::ssub_sat:
702 if (!isa<LandingPadInst>(Token))
703 return cast<GCStatepointInst>(Token);
707 cast<Instruction>(Token)->
getParent()->getUniquePredecessor();
709 assert(InvokeBB &&
"safepoints should have unique landingpads");
711 "safepoint block should be well formed");
724 return *(Opt->Inputs.begin() + getDerivedPtrIndex());
725 return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
StringSwitch & Case(StringLiteral S, T Value)
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
DIExpression * getExpression() const
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static Optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
This is an optimization pass for GlobalISel generic memory operations.
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_NODISCARD R Default(T Value)
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static constexpr size_t npos
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
static bool classof(const IntrinsicInst *I)
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
void setVectorLengthParam(Value *)
CmpInst::Predicate getPredicate() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
bool canIgnoreVectorLengthParam() const
bool isDefaultFPEnvironment() const
@ ICMP_SGT
signed greater than
static Function * getDeclarationForParams(Module *M, Intrinsic::ID, Type *ReturnType, ArrayRef< Value * > Params)
Declares a llvm.vp.
The instances of the Type class are immutable: once they are created, they are never changed.
int lookupLLVMIntrinsicByName(ArrayRef< const char * > NameTable, StringRef Name)
Looks up Name in NameTable via binary search.
Value * getMemoryDataParam() const
ConstantInt * getIndex() const
DILocalVariable * getVariable() const
ConstantInt * getNumCounters() const
static Optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)
static bool classof(const IntrinsicInst *I)
@ ICMP_SLE
signed less or equal
VScaleVal_match m_VScale(const DataLayout &DL)
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Value * getVariableLocationOp(unsigned OpIdx) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
void setMaskParam(Value *)
LLVM Basic Block Representation.
constexpr bool hasValue() const
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Represents a gc.statepoint intrinsic call.
static bool isVPReduction(Intrinsic::ID ID)
This is the shared class of boolean and integer constants.
void addVariableLocationOps(ArrayRef< Value * > NewValues, DIExpression *NewExpr)
Adding a new location operand will always result in this intrinsic using an ArgList,...
bool match(Val *V, const Pattern &P)
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static Intrinsic::ID getForOpcode(unsigned OC)
The llvm.vp.* intrinsics for this instruction Opcode.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
unsigned getNumVariableLocationOps() const
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
static bool isVPCmp(Intrinsic::ID ID)
static FCmpInst::Predicate getFPPredicateFromMD(const Value *Op)
static Optional< unsigned > getMemoryPointerParamPos(Intrinsic::ID)
Value * getVectorLengthParam() const
ElementCount getStaticVectorLength() const
bool isSigned() const
Whether the intrinsic is signed or unsigned.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getPointerAlignment() const
Value * getDerivedPtr() const
This is an important class for using LLVM in a threaded context.
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
static bool isVPIntrinsic(Intrinsic::ID)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
constexpr const T & getValue() const &
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
Optional< fp::ExceptionBehavior > convertStrToExceptionBehavior(StringRef)
Returns a valid ExceptionBehavior enumerator when given a string valid as input in constrained intrin...
A Module instance is used to store all the information related to an LLVM module.
static ICmpInst::Predicate getIntPredicateFromMD(const Value *Op)
static Optional< unsigned > getMemoryDataParamPos(Intrinsic::ID)
@ ICMP_SLT
signed less than
FCmpInst::Predicate getPredicate() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Optional< fp::ExceptionBehavior > getExceptionBehavior() const
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
StringRef - Represent a constant reference to a string, i.e.
@ ICMP_ULT
unsigned less than
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
Optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
Metadata * getRawLocation() const
LLVMContext & getContext() const
All values hold a context through their type.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
unsigned getStartParamPos() const
void setArgOperand(unsigned i, Value *v)
static ValueAsMetadata * getAsMetadata(Value *V)
constexpr LLVM_NODISCARD size_t size() const
size - Get the string size.
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Value * getBasePtr() const
static IntegerType * getInt64Ty(LLVMContext &C)
static bool classof(const IntrinsicInst *I)
Value * getMemoryPointerParam() const
unsigned arg_size() const
unsigned getVectorParamPos() const
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
@ ebIgnore
This corresponds to "fpexcept.ignore".
@ ICMP_SGE
signed greater or equal
static Optional< unsigned > getMaskParamPos(Intrinsic::ID IntrinsicID)
A wrapper class for inspecting calls to intrinsic functions.
@ ICMP_UGT
unsigned greater than
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Value * getArgOperand(unsigned i) const
static bool isVPCast(Intrinsic::ID ID)
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
size_t size() const
size - Get the array size.
A range adaptor for a pair of iterators.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static unsigned getBasePtrIndex(const MemSDNode *N)
MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset by the chain and intrinsi...
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Optional< RoundingMode > getRoundingMode() const
A switch()-like statement whose cases are string literals.
Value * getMaskParam() const
@ NearestTiesToEven
roundTiesToEven.
LLVM Value Representation.
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
const GCStatepointInst * getStatepoint() const
The statepoint with which this gc.relocate is associated.
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Rounding
Possible values of current rounding mode, which is specified in bits 23:22 of FPCR.