Go to the documentation of this file.
45 assert(MD &&
"First operand of DbgVariableIntrinsic should be non-null.");
48 if (
auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
52 if (
auto *
AL = dyn_cast<DIArgList>(MD))
62 assert(MD &&
"First operand of DbgVariableIntrinsic should be non-null.");
63 if (
auto *
AL = dyn_cast<DIArgList>(MD))
64 return AL->getArgs()[OpIdx]->getValue();
68 isa<ValueAsMetadata>(MD) &&
69 "Attempted to get location operand from DbgVariableIntrinsic with none.");
70 auto *V = cast<ValueAsMetadata>(MD);
71 assert(OpIdx == 0 &&
"Operand Index must be 0 for a debug intrinsic with a "
72 "single location operand.");
77 return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
78 cast<MetadataAsValue>(V)->getMetadata())
84 assert(NewValue &&
"Values must be non-null");
86 auto OldIt =
find(Locations, OldValue);
87 assert(OldIt != Locations.end() &&
"OldValue must be a current location");
89 Value *NewOperand = isa<MetadataAsValue>(NewValue)
93 return setArgOperand(0, NewOperand);
97 for (
auto *VMD : Locations)
98 MDs.push_back(VMD == *OldIt ? NewOperand :
getAsMetadata(VMD));
106 Value *NewOperand = isa<MetadataAsValue>(NewValue)
110 return setArgOperand(0, NewOperand);
115 MDs.push_back(Idx == OpIdx ? NewOperand
123 return Fragment->SizeInBits;
139 const char *
const *Low = NameTable.
begin();
140 const char *
const *
High = NameTable.
end();
141 const char *
const *LastLow = Low;
142 while (CmpEnd <
Name.size() &&
High - Low > 0) {
143 size_t CmpStart = CmpEnd;
144 CmpEnd =
Name.find(
'.', CmpStart + 1);
146 auto Cmp = [CmpStart, CmpEnd](
const char *LHS,
const char *RHS) {
147 return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
150 std::tie(Low,
High) = std::equal_range(Low,
High,
Name.data(), Cmp);
155 if (LastLow == NameTable.
end())
158 if (
Name == NameFound ||
159 (
Name.startswith(NameFound) &&
Name[NameFound.
size()] ==
'.'))
160 return LastLow - NameTable.
begin();
176 cast<MetadataAsValue>(
getArgOperand(NumOperands - 2))->getMetadata();
177 if (!MD || !isa<MDString>(MD))
186 cast<MetadataAsValue>(
getArgOperand(NumOperands - 1))->getMetadata();
187 if (!MD || !isa<MDString>(MD))
194 if (!MD || !isa<MDString>(MD))
218 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
219 case Intrinsic::INTRINSIC: \
221 #include "llvm/IR/ConstrainedOps.def"
229 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
230 case Intrinsic::INTRINSIC: \
232 #include "llvm/IR/ConstrainedOps.def"
237 switch (
I->getIntrinsicID()) {
238 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
239 case Intrinsic::INTRINSIC:
240 #include "llvm/IR/ConstrainedOps.def"
249 auto VT = cast<VectorType>(
T);
250 auto ElemCount = VT->getElementCount();
255 return GetVectorLengthOfType(VPMask->getType());
273 switch (IntrinsicID) {
277 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
278 case Intrinsic::VPID: \
280 #include "llvm/IR/VPIntrinsics.def"
285 switch (IntrinsicID) {
289 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
290 case Intrinsic::VPID: \
292 #include "llvm/IR/VPIntrinsics.def"
301 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
302 case Intrinsic::VPID: \
304 #include "llvm/IR/VPIntrinsics.def"
315 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
316 #define HANDLE_VP_TO_OPC(OPC) FunctionalOC = Instruction::OPC;
317 #define END_REGISTER_VP_INTRINSIC(...) break;
318 #include "llvm/IR/VPIntrinsics.def"
329 #define HANDLE_VP_TO_OPC(OPC) case Instruction::OPC:
330 #define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
331 #include "llvm/IR/VPIntrinsics.def"
336 using namespace PatternMatch;
351 if (EC.isScalable()) {
356 const auto &
DL = ParMod->getDataLayout();
359 uint64_t VScaleFactor;
361 return VScaleFactor >= EC.getKnownMinValue();
366 auto VLConst = dyn_cast<ConstantInt>(VLParam);
370 uint64_t VLNum = VLConst->getZExtValue();
371 if (VLNum >= EC.getKnownMinValue())
379 case Intrinsic::uadd_with_overflow:
380 case Intrinsic::sadd_with_overflow:
381 case Intrinsic::uadd_sat:
382 case Intrinsic::sadd_sat:
384 case Intrinsic::usub_with_overflow:
385 case Intrinsic::ssub_with_overflow:
386 case Intrinsic::usub_sat:
387 case Intrinsic::ssub_sat:
388 return Instruction::Sub;
389 case Intrinsic::umul_with_overflow:
390 case Intrinsic::smul_with_overflow:
391 return Instruction::Mul;
399 case Intrinsic::sadd_with_overflow:
400 case Intrinsic::ssub_with_overflow:
401 case Intrinsic::smul_with_overflow:
402 case Intrinsic::sadd_sat:
403 case Intrinsic::ssub_sat:
422 if (!isa<LandingPadInst>(Token))
423 return cast<GCStatepointInst>(Token);
427 cast<Instruction>(Token)->
getParent()->getUniquePredecessor();
429 assert(InvokeBB &&
"safepoints should have unique landingpads");
431 "safepoint block should be well formed");
444 return *(Opt->Inputs.begin() + getDerivedPtrIndex());
445 return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
StringSwitch & Case(StringLiteral S, T Value)
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
DIExpression * getExpression() const
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_NODISCARD R Default(T Value)
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static constexpr size_t npos
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
static bool classof(const IntrinsicInst *I)
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
bool canIgnoreVectorLengthParam() const
The instances of the Type class are immutable: once they are created, they are never changed.
int lookupLLVMIntrinsicByName(ArrayRef< const char * > NameTable, StringRef Name)
Looks up Name in NameTable via binary search.
DILocalVariable * getVariable() const
static bool classof(const IntrinsicInst *I)
unsigned getNumArgOperands() const
VScaleVal_match m_VScale(const DataLayout &DL)
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Value * getVariableLocationOp(unsigned OpIdx) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
LLVM Basic Block Representation.
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Represents a gc.statepoint intrinsic call.
bool match(Val *V, const Pattern &P)
static Optional< int > GetVectorLengthParamPos(Intrinsic::ID IntrinsicID)
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
static Intrinsic::ID GetForOpcode(unsigned OC)
The llvm.vp.* intrinsics for this instruction Opcode.
unsigned getNumVariableLocationOps() const
Value * getVectorLengthParam() const
static unsigned GetFunctionalOpcodeForVP(Intrinsic::ID ID)
ElementCount getStaticVectorLength() const
static Optional< int > GetMaskParamPos(Intrinsic::ID IntrinsicID)
bool isSigned() const
Whether the intrinsic is signed or unsigned.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Value * getDerivedPtr() const
This is an important class for using LLVM in a threaded context.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Optional< fp::ExceptionBehavior > StrToExceptionBehavior(StringRef)
Returns a valid ExceptionBehavior enumerator when given a string valid as input in constrained intrin...
static bool IsVPIntrinsic(Intrinsic::ID)
A Module instance is used to store all the information related to an LLVM module.
FCmpInst::Predicate getPredicate() const
Optional< fp::ExceptionBehavior > getExceptionBehavior() const
StringRef - Represent a constant reference to a string, i.e.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Metadata * getRawLocation() const
LLVMContext & getContext() const
All values hold a context through their type.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static ValueAsMetadata * getAsMetadata(Value *V)
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Value * getBasePtr() const
static IntegerType * getInt64Ty(LLVMContext &C)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
A wrapper class for inspecting calls to intrinsic functions.
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Value * getArgOperand(unsigned i) const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A range adaptor for a pair of iterators.
static unsigned getBasePtrIndex(const MemSDNode *N)
MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset by the chain and intrinsi...
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Optional< RoundingMode > getRoundingMode() const
A switch()-like statement whose cases are string literals.
Value * getMaskParam() const
LLVM_NODISCARD size_t size() const
size - Get the string size.
LLVM Value Representation.
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
const GCStatepointInst * getStatepoint() const
The statepoint with which this gc.relocate is associated.
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Optional< RoundingMode > StrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...