Go to the documentation of this file.
69 "jump-is-expensive",
cl::init(
false),
70 cl::desc(
"Do not create extra branches to split comparison logic."),
75 cl::desc(
"Set minimum number of entries to use a jump table."));
79 cl::desc(
"Set maximum size of jump tables."));
84 cl::desc(
"Minimum density for building a jump table in "
85 "a normal function"));
90 cl::desc(
"Minimum density for building a jump table in "
91 "an optsize function"));
98 cl::desc(
"Don't mutate strict-float node to a legalize node"),
102 assert(TT.isOSDarwin() &&
"should be called with darwin triple");
108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
111 return !TT.isOSVersionLT(7, 0);
116 void TargetLoweringBase::InitLibcalls(
const Triple &TT) {
117 #define HANDLE_LIBCALL(code, name) \
118 setLibcallName(RTLIB::code, name);
119 #include "llvm/IR/RuntimeLibcalls.def"
120 #undef HANDLE_LIBCALL
122 for (
int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
158 if (
TT.isOSDarwin()) {
166 switch (
TT.getArch()) {
169 if (
TT.isMacOSX() && !
TT.isMacOSXVersionLT(10, 6))
183 if (
TT.isWatchABI()) {
195 if (
TT.isGNUEnvironment() ||
TT.isOSFuchsia() ||
196 (
TT.isAndroid() && !
TT.isAndroidVersionLT(9))) {
209 if (
TT.isOSOpenBSD()) {
228 RTLIB::UNKNOWN_LIBCALL;
236 return FPEXT_F16_F32;
238 return FPEXT_F16_F64;
240 return FPEXT_F16_F80;
242 return FPEXT_F16_F128;
245 return FPEXT_F32_F64;
247 return FPEXT_F32_F128;
249 return FPEXT_F32_PPCF128;
252 return FPEXT_F64_F128;
254 return FPEXT_F64_PPCF128;
257 return FPEXT_F80_F128;
260 return UNKNOWN_LIBCALL;
268 return FPROUND_F32_F16;
270 return FPROUND_F64_F16;
272 return FPROUND_F80_F16;
274 return FPROUND_F128_F16;
276 return FPROUND_PPCF128_F16;
279 return FPROUND_F32_BF16;
281 return FPROUND_F64_BF16;
284 return FPROUND_F64_F32;
286 return FPROUND_F80_F32;
288 return FPROUND_F128_F32;
290 return FPROUND_PPCF128_F32;
293 return FPROUND_F80_F64;
295 return FPROUND_F128_F64;
297 return FPROUND_PPCF128_F64;
300 return FPROUND_F128_F80;
303 return UNKNOWN_LIBCALL;
311 return FPTOSINT_F16_I32;
313 return FPTOSINT_F16_I64;
315 return FPTOSINT_F16_I128;
318 return FPTOSINT_F32_I32;
320 return FPTOSINT_F32_I64;
322 return FPTOSINT_F32_I128;
325 return FPTOSINT_F64_I32;
327 return FPTOSINT_F64_I64;
329 return FPTOSINT_F64_I128;
332 return FPTOSINT_F80_I32;
334 return FPTOSINT_F80_I64;
336 return FPTOSINT_F80_I128;
339 return FPTOSINT_F128_I32;
341 return FPTOSINT_F128_I64;
343 return FPTOSINT_F128_I128;
346 return FPTOSINT_PPCF128_I32;
348 return FPTOSINT_PPCF128_I64;
350 return FPTOSINT_PPCF128_I128;
352 return UNKNOWN_LIBCALL;
360 return FPTOUINT_F16_I32;
362 return FPTOUINT_F16_I64;
364 return FPTOUINT_F16_I128;
367 return FPTOUINT_F32_I32;
369 return FPTOUINT_F32_I64;
371 return FPTOUINT_F32_I128;
374 return FPTOUINT_F64_I32;
376 return FPTOUINT_F64_I64;
378 return FPTOUINT_F64_I128;
381 return FPTOUINT_F80_I32;
383 return FPTOUINT_F80_I64;
385 return FPTOUINT_F80_I128;
388 return FPTOUINT_F128_I32;
390 return FPTOUINT_F128_I64;
392 return FPTOUINT_F128_I128;
395 return FPTOUINT_PPCF128_I32;
397 return FPTOUINT_PPCF128_I64;
399 return FPTOUINT_PPCF128_I128;
401 return UNKNOWN_LIBCALL;
409 return SINTTOFP_I32_F16;
411 return SINTTOFP_I32_F32;
413 return SINTTOFP_I32_F64;
415 return SINTTOFP_I32_F80;
417 return SINTTOFP_I32_F128;
419 return SINTTOFP_I32_PPCF128;
422 return SINTTOFP_I64_F16;
424 return SINTTOFP_I64_F32;
426 return SINTTOFP_I64_F64;
428 return SINTTOFP_I64_F80;
430 return SINTTOFP_I64_F128;
432 return SINTTOFP_I64_PPCF128;
435 return SINTTOFP_I128_F16;
437 return SINTTOFP_I128_F32;
439 return SINTTOFP_I128_F64;
441 return SINTTOFP_I128_F80;
443 return SINTTOFP_I128_F128;
445 return SINTTOFP_I128_PPCF128;
447 return UNKNOWN_LIBCALL;
455 return UINTTOFP_I32_F16;
457 return UINTTOFP_I32_F32;
459 return UINTTOFP_I32_F64;
461 return UINTTOFP_I32_F80;
463 return UINTTOFP_I32_F128;
465 return UINTTOFP_I32_PPCF128;
468 return UINTTOFP_I64_F16;
470 return UINTTOFP_I64_F32;
472 return UINTTOFP_I64_F64;
474 return UINTTOFP_I64_F80;
476 return UINTTOFP_I64_F128;
478 return UINTTOFP_I64_PPCF128;
481 return UINTTOFP_I128_F16;
483 return UINTTOFP_I128_F32;
485 return UINTTOFP_I128_F64;
487 return UINTTOFP_I128_F80;
489 return UINTTOFP_I128_F128;
491 return UINTTOFP_I128_PPCF128;
493 return UNKNOWN_LIBCALL;
497 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
503 unsigned ModeN, ModelN;
521 return UNKNOWN_LIBCALL;
539 return UNKNOWN_LIBCALL;
542 #define LCALLS(A, B) \
543 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
545 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
549 return LC[ModeN][ModelN];
553 return LC[ModeN][ModelN];
557 return LC[ModeN][ModelN];
561 return LC[ModeN][ModelN];
565 return LC[ModeN][ModelN];
569 return LC[ModeN][ModelN];
572 return UNKNOWN_LIBCALL;
579 #define OP_TO_LIBCALL(Name, Enum) \
581 switch (VT.SimpleTy) { \
583 return UNKNOWN_LIBCALL; \
613 return UNKNOWN_LIBCALL;
617 switch (ElementSize) {
619 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
621 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
623 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
625 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
627 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
629 return UNKNOWN_LIBCALL;
634 switch (ElementSize) {
636 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
638 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
640 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
642 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
644 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
646 return UNKNOWN_LIBCALL;
651 switch (ElementSize) {
653 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
655 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
657 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
659 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
661 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
663 return UNKNOWN_LIBCALL;
710 HasMultipleConditionRegisters =
false;
711 HasExtractBitsInsn =
false;
715 StackPointerRegisterToSaveRestore = 0;
722 MaxBytesForAlignment = 0;
725 MaxAtomicSizeInBitsSupported = 1024;
731 MinCmpXchgSizeInBits = 0;
732 SupportsUnalignedAtomics =
false;
742 memset(OpActions, 0,
sizeof(OpActions));
743 memset(LoadExtActions, 0,
sizeof(LoadExtActions));
744 memset(TruncStoreActions, 0,
sizeof(TruncStoreActions));
745 memset(IndexedModeActions, 0,
sizeof(IndexedModeActions));
746 memset(CondCodeActions, 0,
sizeof(CondCodeActions));
749 std::end(TargetDAGCombineArray), 0);
756 OpActions[(
unsigned)VT.SimpleTy][
NT] =
Expand;
857 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
858 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
859 #include "llvm/IR/ConstrainedOps.def"
918 bool LegalTypes)
const {
929 "ShiftVT is still too small!");
947 unsigned DestAS)
const {
954 JumpIsExpensive = isExpensive;
970 "Promote may not follow Expand or Promote");
986 assert(NVT != VT &&
"Unable to round integer VT");
1035 EVT OldEltVT = EltVT;
1074 if (LargerVector ==
MVT())
1098 unsigned &NumIntermediates,
1105 unsigned NumVectorRegs = 1;
1111 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1117 NumVectorRegs = EC.getKnownMinValue();
1124 while (EC.getKnownMinValue() > 1 &&
1126 EC = EC.divideCoefficientBy(2);
1127 NumVectorRegs <<= 1;
1130 NumIntermediates = NumVectorRegs;
1135 IntermediateVT = NewVT;
1144 RegisterVT = DestVT;
1145 if (
EVT(DestVT).bitsLT(NewVT))
1150 return NumVectorRegs;
1193 for (
unsigned i = 0;
i <
MI->getNumOperands(); ++
i) {
1200 unsigned TiedTo =
i;
1202 TiedTo =
MI->findTiedOperandIdx(
i);
1219 assert(
MI->getOpcode() == TargetOpcode::STATEPOINT &&
"sanity");
1220 MIB.
addImm(StackMaps::IndirectMemRefOp);
1227 MIB.
addImm(StackMaps::DirectMemRefOp);
1232 assert(MIB->
mayLoad() &&
"Folded a stackmap use to a non-load!");
1239 if (
MI->getOpcode() != TargetOpcode::STATEPOINT) {
1248 MI->eraseFromParent();
1258 std::pair<const TargetRegisterClass *, uint8_t>
1263 return std::make_pair(RC, 0);
1272 for (
unsigned i : SuperRegRC.
set_bits()) {
1281 return std::make_pair(BestRC, 1);
1289 "Too many value types for ValueTypeActions to hold!");
1293 NumRegistersForVT[
i] = 1;
1301 for (; RegClassForVT[LargestIntReg] ==
nullptr; --LargestIntReg)
1302 assert(LargestIntReg !=
MVT::i1 &&
"No integer registers defined!");
1306 for (
unsigned ExpandedReg = LargestIntReg + 1;
1308 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1317 unsigned LegalIntReg = LargestIntReg;
1318 for (
unsigned IntReg = LargestIntReg - 1;
1319 IntReg >= (unsigned)
MVT::i1; --IntReg) {
1322 LegalIntReg = IntReg;
1324 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1418 bool IsLegalWiderType =
false;
1421 switch (PreferredAction) {
1428 for (
unsigned nVT =
i + 1;
1435 TransformToType[
i] = SVT;
1436 RegisterTypeForVT[
i] = SVT;
1437 NumRegistersForVT[
i] = 1;
1439 IsLegalWiderType =
true;
1443 if (IsLegalWiderType)
1456 EC.getKnownMinValue() &&
1458 TransformToType[
i] = SVT;
1459 RegisterTypeForVT[
i] = SVT;
1460 NumRegistersForVT[
i] = 1;
1462 IsLegalWiderType =
true;
1466 if (IsLegalWiderType)
1472 TransformToType[
i] = NVT;
1474 RegisterTypeForVT[
i] = NVT;
1475 NumRegistersForVT[
i] = 1;
1485 unsigned NumIntermediates;
1487 NumIntermediates, RegisterVT,
this);
1488 NumRegistersForVT[
i] = NumRegisters;
1489 assert(NumRegistersForVT[
i] == NumRegisters &&
1490 "NumRegistersForVT size cannot represent NumRegisters!");
1491 RegisterTypeForVT[
i] = RegisterVT;
1501 else if (EC.getKnownMinValue() > 1)
1508 TransformToType[
i] = NVT;
1527 RepRegClassForVT[
i] =
RRC;
1528 RepRegClassCostForVT[
i] =
Cost;
1551 EVT VT,
EVT &IntermediateVT,
1552 unsigned &NumIntermediates,
1553 MVT &RegisterVT)
const {
1566 IntermediateVT = RegisterEVT;
1568 NumIntermediates = 1;
1576 unsigned NumVectorRegs = 1;
1591 "Don't know how to legalize this scalable vector type");
1597 IntermediateVT = PartVT;
1599 return NumIntermediates;
1614 NumVectorRegs <<= 1;
1617 NumIntermediates = NumVectorRegs;
1622 IntermediateVT = NewVT;
1625 RegisterVT = DestVT;
1627 if (
EVT(DestVT).bitsLT(NewVT)) {
1637 return NumVectorRegs;
1650 const bool OptForSize =
1651 SI->getParent()->getParent()->hasOptSize() ||
1658 return (OptForSize || Range <= MaxJumpTableSize) &&
1659 (NumCases * 100 >= Range * MinDensity);
1663 EVT ConditionVT)
const {
1677 unsigned NumValues = ValueVTs.size();
1678 if (NumValues == 0)
return;
1680 for (
unsigned j = 0,
f = NumValues;
j !=
f; ++
j) {
1681 EVT VT = ValueVTs[
j];
1715 for (
unsigned i = 0;
i < NumParts; ++
i)
1725 return DL.getABITypeAlign(Ty).value();
1737 if (VT.
isZeroSized() || Alignment >=
DL.getABITypeAlign(Ty)) {
1739 if (Fast !=
nullptr)
1757 unsigned AddrSpace,
Align Alignment,
1759 unsigned *Fast)
const {
1767 unsigned *Fast)
const {
1775 unsigned *Fast)
const {
1786 enum InstructionOpcodes {
1787 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1788 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1789 #include "llvm/IR/Instruction.def"
1791 switch (
static_cast<InstructionOpcodes
>(Opcode)) {
1794 case Switch:
return 0;
1795 case IndirectBr:
return 0;
1796 case Invoke:
return 0;
1797 case CallBr:
return 0;
1798 case Resume:
return 0;
1799 case Unreachable:
return 0;
1800 case CleanupRet:
return 0;
1801 case CatchRet:
return 0;
1802 case CatchPad:
return 0;
1803 case CatchSwitch:
return 0;
1804 case CleanupPad:
return 0;
1824 case Alloca:
return 0;
1827 case GetElementPtr:
return 0;
1828 case Fence:
return 0;
1829 case AtomicCmpXchg:
return 0;
1830 case AtomicRMW:
return 0;
1847 case Call:
return 0;
1849 case UserOp1:
return 0;
1850 case UserOp2:
return 0;
1851 case VAArg:
return 0;
1857 case LandingPad:
return 0;
1866 bool UseTLS)
const {
1870 const char *UnsafeStackPtrVar =
"__safestack_unsafe_stack_ptr";
1871 auto UnsafeStackPtr =
1872 dyn_cast_or_null<GlobalVariable>(
M->getNamedValue(UnsafeStackPtrVar));
1876 if (!UnsafeStackPtr) {
1877 auto TLSModel = UseTLS ?
1885 UnsafeStackPtrVar,
nullptr, TLSModel);
1888 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1890 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1892 (UseTLS ?
"" :
"not ") +
"be thread-local");
1894 return UnsafeStackPtr;
1906 FunctionCallee Fn =
M->getOrInsertFunction(
"__safestack_pointer_address",
1962 Constant *
C =
M.getOrInsertGlobal(
"__guard_local", PtrTy);
1973 if (!
M.getNamedValue(
"__stack_chk_guard")) {
1976 "__stack_chk_guard");
1982 GV->setDSOLocal(
true);
1989 return M.getNamedValue(
"__stack_chk_guard");
2023 return PrefLoopAlignment;
2028 return MaxBytesForAlignment;
2039 return F.getFnAttribute(
"reciprocal-estimates").getValueAsString();
2047 std::string Name = VT.
isVector() ?
"vec-" :
"";
2049 Name += IsSqrt ?
"sqrt" :
"div";
2058 "Unexpected FP type for reciprocal estimate");
2070 const char RefStepToken =
':';
2071 Position =
In.find(RefStepToken);
2075 StringRef RefStepString =
In.substr(Position + 1);
2078 if (RefStepString.
size() == 1) {
2079 char RefStepChar = RefStepString[0];
2081 Value = RefStepChar -
'0';
2092 if (Override.
empty())
2093 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2096 Override.
split(OverrideVector,
',');
2097 unsigned NumArgs = OverrideVector.size();
2107 Override = Override.
substr(0, RefPos);
2111 if (Override ==
"all")
2115 if (Override ==
"none")
2119 if (Override ==
"default")
2120 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2125 std::string VTNameNoSize = VTName;
2126 VTNameNoSize.pop_back();
2127 static const char DisabledPrefix =
'!';
2129 for (
StringRef RecipType : OverrideVector) {
2133 RecipType = RecipType.substr(0, RefPos);
2136 bool IsDisabled = RecipType[0] == DisabledPrefix;
2138 RecipType = RecipType.substr(1);
2140 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2145 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2152 if (Override.
empty())
2153 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2156 Override.
split(OverrideVector,
',');
2157 unsigned NumArgs = OverrideVector.size();
2166 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2169 Override = Override.
substr(0, RefPos);
2170 assert(Override !=
"none" &&
2171 "Disabled reciprocals, but specifed refinement steps?");
2174 if (Override ==
"all" || Override ==
"default")
2180 std::string VTNameNoSize = VTName;
2181 VTNameNoSize.pop_back();
2183 for (
StringRef RecipType : OverrideVector) {
2189 RecipType = RecipType.substr(0, RefPos);
2190 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2194 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2259 if (LI.
hasMetadata(LLVMContext::MD_invariant_load))
2276 if (
SI.isVolatile())
2279 if (
SI.hasMetadata(LLVMContext::MD_nontemporal))
2292 if (
const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2293 if (RMW->isVolatile())
2296 if (CmpX->isVolatile())
2310 return Builder.CreateFence(Ord);
2319 return Builder.CreateFence(Ord);
2330 auto &MF = *
MI.getMF();
2331 auto &
MRI = MF.getRegInfo();
2338 auto maxUses = [](
unsigned RematCost) {
2351 switch (
MI.getOpcode()) {
2356 case TargetOpcode::G_CONSTANT:
2357 case TargetOpcode::G_FCONSTANT:
2358 case TargetOpcode::G_FRAME_INDEX:
2359 case TargetOpcode::G_INTTOPTR:
2361 case TargetOpcode::G_GLOBAL_VALUE: {
2364 unsigned MaxUses = maxUses(RematCost);
2365 if (MaxUses == UINT_MAX)
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults.
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
@ RRC
Y = RRC X, rotate right via carry.
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
void setTypeAction(MVT VT, LegalizeTypeAction Action)
constexpr bool isScalar() const
Exactly one element.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, cl::desc("Set maximum size of jump tables."))
MVT getVectorElementType() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none.
This is an optimization pass for GlobalISel generic memory operations.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
@ MAX_INT_BITS
Maximum number of bits that can be specified.
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static constexpr ElementCount getScalable(ScalarTy MinVal)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
@ Or
Bitwise or logical OR of integers.
A parsed version of the target data layout string in and methods for querying it.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
ElementCount getVectorElementCount() const
const Function * getParent() const
Return the enclosing method, or null if none.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ HiddenVisibility
The GV is hidden.
const MachineInstrBuilder & add(const MachineOperand &MO) const
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ TypeScalarizeScalableVector
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
static constexpr size_t npos
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isVector() const
Return true if this is a vector value type.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Reg
All possible values of the reg field in the ModR/M byte.
@ MOInvariant
The memory access always returns the same value (or traps).
iterator_range< const_set_bits_iterator > set_bits() const
void initActions()
Initialize all of the actions to default values.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
Triple - Helper class for working with autoconf configuration names.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
The instances of the Type class are immutable: once they are created, they are never changed.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
static constexpr ElementCount getFixed(ScalarTy MinVal)
const_iterator end(StringRef path)
Get end iterator over path.
A description of a memory reference used in the backend.
@ LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
@ C
The default llvm calling convention, compatible with C.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type.
#define OP_TO_LIBCALL(Name, Enum)
virtual bool isJumpTableRelative() const
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
Value * getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Value * getPointerOperand()
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
Align getAlign() const
Return the alignment of the access that is being performed.
unsigned const TargetRegisterInfo * TRI
bool isZeroSized() const
Test if the given EVT has zero size, this will fail if called on a scalable type.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
LLVMContext * getContext() const
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ ADDCARRY
Carry-using nodes for multiple precision addition and subtraction.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
bool isValid() const
Return true if this is a valid simple valuetype.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool isAndroid() const
Tests whether the target is Android.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
@ And
Bitwise or logical AND of integers.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Itanium Name Demangler i e convert the string _Z1fv into f()". You can also use the CRTP base ManglingParser to perform some simple analysis on the mangled name
unsigned getAddrSpace() const
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
static bool darwinHasSinCos(const Triple &TT)
@ LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
(vector float) vec_cmpeq(*A, *B) C
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
uint64_t getScalarSizeInBits() const
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on two values,...
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
virtual unsigned getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
MachineOperand class - Representation of each machine instruction operand.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
bool isInteger() const
Return true if this is an integer or a vector integer type.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
int getNumOccurrences() const
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
void freezeReservedRegs(const MachineFunction &)
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ SMULO
Same for multiplication.
Libcall getFPLibCall(EVT VT, Libcall Call_F32, Libcall Call_F64, Libcall Call_F80, Libcall Call_F128, Libcall Call_PPCF128)
GetFPLibCall - Helper to return the right libcall for the given floating point type,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This struct is a compact representation of a valid (non-zero power of two) alignment.
unsigned LoopAlignment
If greater than 0, override TargetLoweringBase::PrefLoopAlignment.
virtual bool softPromoteHalfType() const
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool isPositionIndependent() const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Analysis providing profile information.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
@ FADD
Simple binary floating point operators.
constexpr bool empty() const
empty - Check if the string is empty.
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
An instruction for storing to memory.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
This is an important base class in LLVM.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
LegalizeTypeAction getTypeAction(MVT VT) const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Representation of each machine instruction.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Module * getParent()
Get the module that this global value is contained inside of...
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Libcall getPOWI(EVT RetVT)
getPOWI - Return the POWI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
@ TRAP
TRAP - Trapping instruction.
This is an important class for using LLVM in a threaded context.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
initializer< Ty > init(const Ty &Val)
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Class to represent pointers.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
Flags
Flags values. These may be or'd together.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Primary interface to the complete machine description for the target machine.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool EnableExtLdPromotion
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
static auto all_valuetypes()
SimpleValueType Iteration.
bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
@ MONonTemporal
The memory access is non-temporal.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
A Module instance is used to store all the information related to an LLVM module.
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
bool isAcquireOrStronger(AtomicOrdering AO)
static MVT getVectorVT(MVT VT, unsigned NumElements)
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
static bool isDigit(const char C)
bool isVector() const
Return true if this is a vector value type.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
StringRef - Represent a constant reference to a string, i.e.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
A cache of @llvm.assume calls within a function.
bool isReleaseOrStronger(AtomicOrdering AO)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
Common base class shared among various IRBuilders.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
An instruction for reading from memory.
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx)
@ MOVolatile
The memory access is volatile.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
an instruction that atomically reads a memory location, combines it with another value,...
@ MOLoad
The memory access reads data.
unsigned const MachineRegisterInfo * MRI
Wrapper class representing virtual and physical registers.
ElementCount getVectorElementCount() const
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
constexpr size_t size() const
size - Get the string size.
const TargetMachine & getTargetMachine() const
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
bool hasAtMostUserInstrs(Register Reg, unsigned MaxUsers) const
hasAtMostUses - Return true if the given register has at most MaxUsers non-debug user instructions.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Function & getFunction()
Return the LLVM function that this machine code represents.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled,...
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
Return the type to use for a scalar shift opcode, given the shifted amount type.
const DataLayout & getDataLayout() const
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
BasicBlock * GetInsertBlock() const
@ ARM_AAPCS_VFP
Same as ARM_AAPCS, but uses hard floating point ABI.
bool isWindowsGNUEnvironment() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Provides information about what library functions are available for the current target.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned getNumRegClasses() const
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
@ MOStore
The memory access writes data.
@ ADD
Simple integer binary arithmetic operators.
@ SSUBO
Same for subtraction.
constexpr force_iteration_on_noniterable_enum_t force_iteration_on_noniterable_enum
Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT)
Return the outline atomics value for the given opcode, atomic ordering and type, or UNKNOWN_LIBCALL i...
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ ExternalLinkage
Externally visible function.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
static cl::opt< bool > DisableStrictNodeMutation("disable-strictnode-mutation", cl::desc("Don't mutate strict-float node to a legalize node"), cl::init(false), cl::Hidden)
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
@ SHL
Shift and rotation operations.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
unsigned getNumOperands() const
Retuns the total number of operands.
static const int LAST_INDEXED_MODE
bool hasRetAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the return value.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const Triple & getTargetTriple() const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
const char LLVMTargetMachineRef TM
@ FNEG
Perform various unary floating-point operations inspired by libm.
constexpr LeafTy coefficientNextPowerOf2() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
static auto fp_valuetypes()
@ SIGN_EXTEND
Conversion operators.
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
bool isFixedLengthVector() const
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
LLVM Value Representation.
Flags getFlags() const
Return the raw flags of the source value,.
An instruction that atomically checks whether a specified value is in a memory location,...
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ Xor
Bitwise or logical XOR of integers.
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size in bytes, rounded up to a whole number of bytes.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const
Loop over all of the value types that can be represented by values in the given register class.
static MVT getIntegerVT(unsigned BitWidth)
@ UndefinedBooleanContent