78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
113#define DEBUG_TYPE "isel"
121 cl::desc(
"Insert the experimental `assertalign` node."),
126 cl::desc(
"Generate low-precision inline sequences "
127 "for some float libcalls"),
133 cl::desc(
"Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
154 const SDValue *Parts,
unsigned NumParts,
157 std::optional<CallingConv::ID> CC);
166 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
173 PartVT, ValueVT, CC))
180 assert(NumParts > 0 &&
"No parts to assemble!");
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
198 if (RoundParts > 2) {
202 PartVT, HalfVT, V, InChain);
204 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
205 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
213 if (RoundParts < NumParts) {
215 unsigned OddParts = NumParts - RoundParts;
218 OddVT, V, InChain, CC);
234 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
245 !PartVT.
isVector() &&
"Unexpected split");
257 if (PartEVT == ValueVT)
261 ValueVT.
bitsLT(PartEVT)) {
270 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
274 if (ValueVT.
bitsLT(PartEVT)) {
279 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
294 llvm::Attribute::StrictFP)) {
296 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
303 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
308 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
309 ValueVT.
bitsLT(PartEVT)) {
310 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
318 const Twine &ErrMsg) {
321 return Ctx.emitError(ErrMsg);
324 if (CI->isInlineAsm()) {
326 *CI, ErrMsg +
", possible invalid constraint for vector type"));
329 return Ctx.emitError(
I, ErrMsg);
338 const SDValue *Parts,
unsigned NumParts,
341 std::optional<CallingConv::ID> CallConv) {
343 assert(NumParts > 0 &&
"No parts to assemble!");
344 const bool IsABIRegCopy = CallConv.has_value();
353 unsigned NumIntermediates;
358 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
359 NumIntermediates, RegisterVT);
363 NumIntermediates, RegisterVT);
366 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
368 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
371 "Part type sizes don't match!");
375 if (NumIntermediates == NumParts) {
378 for (
unsigned i = 0; i != NumParts; ++i)
380 V, InChain, CallConv);
381 }
else if (NumParts > 0) {
384 assert(NumParts % NumIntermediates == 0 &&
385 "Must expand into a divisible number of parts!");
386 unsigned Factor = NumParts / NumIntermediates;
387 for (
unsigned i = 0; i != NumIntermediates; ++i)
389 IntermediateVT, V, InChain, CallConv);
404 DL, BuiltVectorTy,
Ops);
410 if (PartEVT == ValueVT)
416 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
426 "Cannot narrow, it would be a lossy transformation");
432 if (PartEVT == ValueVT)
435 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
439 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
450 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
456 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 }
else if (ValueVT.
bitsLT(PartEVT)) {
466 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
475 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
497 std::optional<CallingConv::ID> CallConv);
504 unsigned NumParts,
MVT PartVT,
const Value *V,
505 std::optional<CallingConv::ID> CallConv = std::nullopt,
519 unsigned OrigNumParts = NumParts;
521 "Copying to an illegal type!");
527 EVT PartEVT = PartVT;
528 if (PartEVT == ValueVT) {
529 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
538 assert(NumParts == 1 &&
"Do not know what to promote to!");
539 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
545 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
549 "Unknown mismatch!");
551 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
552 if (PartVT == MVT::x86mmx)
553 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
557 assert(NumParts == 1 && PartEVT != ValueVT);
558 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
563 "Unknown mismatch!");
566 if (PartVT == MVT::x86mmx)
567 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
573 "Failed to tile the value with PartVT!");
576 if (PartEVT != ValueVT) {
578 "scalar-to-vector conversion failed");
579 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
587 if (NumParts & (NumParts - 1)) {
590 "Do not know what to expand to!");
592 unsigned RoundBits = RoundParts * PartBits;
593 unsigned OddParts = NumParts - RoundParts;
602 std::reverse(Parts + RoundParts, Parts + NumParts);
604 NumParts = RoundParts;
616 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617 for (
unsigned i = 0; i < NumParts; i += StepSize) {
618 unsigned ThisBits = StepSize * PartBits / 2;
621 SDValue &Part1 = Parts[i+StepSize/2];
628 if (ThisBits == PartBits && ThisVT != PartVT) {
629 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
630 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
636 std::reverse(Parts, Parts + OrigNumParts);
658 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660 "Cannot widen to illegal type");
663 }
else if (PartEVT != ValueEVT) {
678 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
689 std::optional<CallingConv::ID> CallConv) {
693 const bool IsABIRegCopy = CallConv.has_value();
696 EVT PartEVT = PartVT;
697 if (PartEVT == ValueVT) {
701 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
736 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
743 "lossy conversion of vector to scalar type");
758 unsigned NumIntermediates;
762 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
767 NumIntermediates, RegisterVT);
770 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
772 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
775 "Mixing scalable and fixed vectors when copying in parts");
777 std::optional<ElementCount> DestEltCnt;
787 if (ValueVT == BuiltVectorTy) {
791 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
811 for (
unsigned i = 0; i != NumIntermediates; ++i) {
826 if (NumParts == NumIntermediates) {
829 for (
unsigned i = 0; i != NumParts; ++i)
831 }
else if (NumParts > 0) {
834 assert(NumIntermediates != 0 &&
"division by zero");
835 assert(NumParts % NumIntermediates == 0 &&
836 "Must expand into a divisible number of parts!");
837 unsigned Factor = NumParts / NumIntermediates;
838 for (
unsigned i = 0; i != NumIntermediates; ++i)
846 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
850 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
853 OS << LS << U.getTagName();
856 Twine(
"cannot lower ", Name)
862 EVT valuevt, std::optional<CallingConv::ID> CC)
868 std::optional<CallingConv::ID> CC) {
882 for (
unsigned i = 0; i != NumRegs; ++i)
883 Regs.push_back(Reg + i);
884 RegVTs.push_back(RegisterVT);
886 Reg = Reg.id() + NumRegs;
913 for (
unsigned i = 0; i != NumRegs; ++i) {
919 *Glue =
P.getValue(2);
922 Chain =
P.getValue(1);
950 EVT FromVT(MVT::Other);
954 }
else if (NumSignBits > 1) {
962 assert(FromVT != MVT::Other);
968 RegisterVT, ValueVT, V, Chain,
CallConv);
984 unsigned NumRegs =
Regs.size();
998 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1004 for (
unsigned i = 0; i != NumRegs; ++i) {
1016 if (NumRegs == 1 || Glue)
1027 Chain = Chains[NumRegs-1];
1033 unsigned MatchingIdx,
const SDLoc &dl,
1035 std::vector<SDValue> &
Ops)
const {
1040 Flag.setMatchingOp(MatchingIdx);
1041 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1049 Flag.setRegClass(RC->
getID());
1060 "No 1:1 mapping from clobbers to regs?");
1063 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1068 "If we clobbered the stack pointer, MFI should know about it.");
1077 for (
unsigned i = 0; i != NumRegs; ++i) {
1078 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1090 unsigned RegCount = std::get<0>(CountAndVT);
1091 MVT RegisterVT = std::get<1>(CountAndVT);
1109 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1111 *
DAG.getMachineFunction().getFunction().getParent());
1116 UnusedArgNodeMap.clear();
1118 PendingExports.clear();
1119 PendingConstrainedFP.clear();
1120 PendingConstrainedFPStrict.clear();
1128 DanglingDebugInfoMap.clear();
1135 if (Pending.
empty())
1141 unsigned i = 0, e = Pending.
size();
1142 for (; i != e; ++i) {
1144 if (Pending[i].
getNode()->getOperand(0) == Root)
1152 if (Pending.
size() == 1)
1179 if (!PendingConstrainedFPStrict.empty()) {
1180 assert(PendingConstrainedFP.empty());
1181 updateRoot(PendingConstrainedFPStrict);
1194 if (!PendingConstrainedFP.empty()) {
1195 assert(PendingConstrainedFPStrict.empty());
1196 updateRoot(PendingConstrainedFP);
1200 return DAG.getRoot();
1208 PendingConstrainedFP.size() +
1209 PendingConstrainedFPStrict.size());
1211 PendingConstrainedFP.end());
1212 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1213 PendingConstrainedFPStrict.end());
1214 PendingConstrainedFP.clear();
1215 PendingConstrainedFPStrict.clear();
1222 PendingExports.append(PendingConstrainedFPStrict.begin(),
1223 PendingConstrainedFPStrict.end());
1224 PendingConstrainedFPStrict.clear();
1225 return updateRoot(PendingExports);
1232 assert(Variable &&
"Missing variable");
1239 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1255 if (IsParameter && FINode) {
1257 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1258 true,
DL, SDNodeOrder);
1263 FuncArgumentDbgValueKind::Declare,
N);
1266 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1267 true,
DL, SDNodeOrder);
1269 DAG.AddDbgValue(SDV, IsParameter);
1274 FuncArgumentDbgValueKind::Declare,
N)) {
1276 <<
" (could not emit func-arg dbg_value)\n");
1287 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1289 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1291 if (It->Values.isKillLocation(It->Expr)) {
1297 It->Values.hasArgList())) {
1300 FnVarLocs->getDILocalVariable(It->VariableID),
1301 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1314 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1317 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1319 assert(DLR->getLabel() &&
"Missing label");
1321 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1322 DAG.AddDbgLabel(SDV);
1326 if (SkipDbgVariableRecords)
1334 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1336 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1345 if (Values.
empty()) {
1362 SDNodeOrder, IsVariadic)) {
1373 if (
I.isTerminator()) {
1374 HandlePHINodesInSuccessorBlocks(
I.getParent());
1381 bool NodeInserted =
false;
1382 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1383 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1384 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1385 if (PCSectionsMD || MMRA) {
1386 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1387 DAG, [&](
SDNode *) { NodeInserted =
true; });
1397 if (PCSectionsMD || MMRA) {
1398 auto It = NodeMap.find(&
I);
1399 if (It != NodeMap.end()) {
1401 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1403 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1404 }
else if (NodeInserted) {
1407 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1408 <<
I.getModule()->getName() <<
"]\n";
1417void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1427#define HANDLE_INST(NUM, OPCODE, CLASS) \
1428 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1429#include "llvm/IR/Instruction.def"
1441 for (
const Value *V : Values) {
1466 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1471 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1472 DIVariable *DanglingVariable = DDI.getVariable();
1474 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1476 << printDDI(
nullptr, DDI) <<
"\n");
1482 for (
auto &DDIMI : DanglingDebugInfoMap) {
1483 DanglingDebugInfoVector &DDIV = DDIMI.second;
1487 for (
auto &DDI : DDIV)
1488 if (isMatchingDbgValue(DDI))
1491 erase_if(DDIV, isMatchingDbgValue);
1499 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1500 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1503 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1504 for (
auto &DDI : DDIV) {
1507 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1510 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1511 "Expected inlined-at fields to agree");
1520 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1521 FuncArgumentDbgValueKind::Value, Val)) {
1523 << printDDI(V, DDI) <<
"\n");
1530 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1531 << ValSDNodeOrder <<
"\n");
1532 SDV = getDbgValue(Val, Variable, Expr,
DL,
1533 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1534 DAG.AddDbgValue(SDV,
false);
1538 <<
" in EmitFuncArgumentDbgValue\n");
1540 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1544 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1545 DAG.AddDbgValue(SDV,
false);
1552 DanglingDebugInfo &DDI) {
1557 const Value *OrigV = V;
1561 unsigned SDOrder = DDI.getSDNodeOrder();
1565 bool StackValue =
true;
1590 if (!AdditionalValues.
empty())
1600 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1601 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1609 assert(OrigV &&
"V shouldn't be null");
1611 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1612 DAG.AddDbgValue(SDV,
false);
1614 << printDDI(OrigV, DDI) <<
"\n");
1631 unsigned Order,
bool IsVariadic) {
1636 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1641 for (
const Value *V : Values) {
1651 if (CE->getOpcode() == Instruction::IntToPtr) {
1670 N = UnusedArgNodeMap[V];
1675 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1676 FuncArgumentDbgValueKind::Value,
N))
1703 bool IsParamOfFunc =
1711 auto VMI =
FuncInfo.ValueMap.find(V);
1712 if (VMI !=
FuncInfo.ValueMap.end()) {
1717 V->getType(), std::nullopt);
1723 unsigned BitsToDescribe = 0;
1725 BitsToDescribe = *VarSize;
1727 BitsToDescribe = Fragment->SizeInBits;
1730 if (
Offset >= BitsToDescribe)
1733 unsigned RegisterSize = RegAndSize.second;
1734 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1735 ? BitsToDescribe -
Offset
1738 Expr,
Offset, FragmentSize);
1742 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1743 DAG.AddDbgValue(SDV,
false);
1759 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1760 false, DbgLoc, Order, IsVariadic);
1761 DAG.AddDbgValue(SDV,
false);
1767 for (
auto &Pair : DanglingDebugInfoMap)
1768 for (
auto &DDI : Pair.second)
1779 if (It !=
FuncInfo.ValueMap.end()) {
1783 DAG.getDataLayout(), InReg, Ty,
1800 if (
N.getNode())
return N;
1860 return DAG.getSplatBuildVector(
1863 return DAG.getConstant(*CI,
DL, VT);
1872 getValue(CPA->getAddrDiscriminator()),
1873 getValue(CPA->getDiscriminator()));
1889 visit(CE->getOpcode(), *CE);
1891 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1897 for (
const Use &U :
C->operands()) {
1903 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1904 Constants.push_back(
SDValue(Val, i));
1913 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1917 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1926 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1928 "Unknown struct or array constant!");
1932 unsigned NumElts = ValueVTs.
size();
1936 for (
unsigned i = 0; i != NumElts; ++i) {
1937 EVT EltVT = ValueVTs[i];
1939 Constants[i] =
DAG.getUNDEF(EltVT);
1950 return DAG.getBlockAddress(BA, VT);
1953 return getValue(Equiv->getGlobalValue());
1958 if (VT == MVT::aarch64svcount) {
1959 assert(
C->isNullValue() &&
"Can only zero this target type!");
1965 assert(
C->isNullValue() &&
"Can only zero this target type!");
1982 for (
unsigned i = 0; i != NumElements; ++i)
2010 return DAG.getFrameIndex(
2018 std::optional<CallingConv::ID> CallConv;
2020 if (CB && !CB->isInlineAsm())
2021 CallConv = CB->getCallingConv();
2024 Inst->getType(), CallConv);
2038void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2051 if (IsMSVCCXX || IsCoreCLR)
2057 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2058 FuncInfo.MBB->addSuccessor(TargetMBB);
2065 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2074 DAG.getMachineFunction().setHasEHContTarget(
true);
2080 Value *ParentPad =
I.getCatchSwitchParentPad();
2083 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2086 assert(SuccessorColor &&
"No parent funclet for catchret!");
2087 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2088 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2093 DAG.getBasicBlock(SuccessorColorMBB));
2097void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2103 FuncInfo.MBB->setIsEHFuncletEntry();
2104 FuncInfo.MBB->setIsCleanupFuncletEntry();
2133 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2139 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2140 UnwindDests.back().first->setIsEHScopeEntry();
2143 UnwindDests.back().first->setIsEHFuncletEntry();
2147 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2148 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2150 if (IsMSVCCXX || IsCoreCLR)
2151 UnwindDests.back().first->setIsEHFuncletEntry();
2153 UnwindDests.back().first->setIsEHScopeEntry();
2155 NewEHPadBB = CatchSwitch->getUnwindDest();
2161 if (BPI && NewEHPadBB)
2163 EHPadBB = NewEHPadBB;
2170 auto UnwindDest =
I.getUnwindDest();
2171 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2172 BranchProbability UnwindDestProb =
2177 for (
auto &UnwindDest : UnwindDests) {
2178 UnwindDest.first->setIsEHPad();
2179 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2181 FuncInfo.MBB->normalizeSuccProbs();
2184 MachineBasicBlock *CleanupPadMBB =
2185 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2191void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2195void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2196 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2197 auto &
DL =
DAG.getDataLayout();
2209 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2226 SmallVector<uint64_t, 4>
Offsets;
2229 unsigned NumValues = ValueVTs.
size();
2232 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2233 for (
unsigned i = 0; i != NumValues; ++i) {
2240 if (MemVTs[i] != ValueVTs[i])
2242 Chains[i] =
DAG.getStore(
2250 MVT::Other, Chains);
2251 }
else if (
I.getNumOperands() != 0) {
2254 unsigned NumValues =
Types.size();
2258 const Function *
F =
I.getParent()->getParent();
2261 I.getOperand(0)->getType(),
F->getCallingConv(),
2265 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2267 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2270 LLVMContext &
Context =
F->getContext();
2271 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2273 for (
unsigned j = 0;
j != NumValues; ++
j) {
2286 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2289 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2293 if (
I.getOperand(0)->getType()->isPointerTy()) {
2295 Flags.setPointerAddrSpace(
2299 if (NeedsRegBlock) {
2300 Flags.setInConsecutiveRegs();
2301 if (j == NumValues - 1)
2302 Flags.setInConsecutiveRegsLast();
2310 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2313 for (
unsigned i = 0; i < NumParts; ++i) {
2316 VT, Types[j], 0, 0));
2326 const Function *
F =
I.getParent()->getParent();
2328 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2330 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2331 Flags.setSwiftError();
2343 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2345 DAG.getMachineFunction().getFunction().getCallingConv();
2346 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2351 "LowerReturn didn't return a valid chain!");
2362 if (V->getType()->isEmptyTy())
2366 if (VMI !=
FuncInfo.ValueMap.end()) {
2368 "Unused value assigned virtual registers!");
2381 if (
FuncInfo.isExportedInst(V))
return;
2393 if (VI->getParent() == FromBB)
2419 const BasicBlock *SrcBB = Src->getBasicBlock();
2420 const BasicBlock *DstBB = Dst->getBasicBlock();
2424 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2434 Src->addSuccessorWithoutProb(Dst);
2437 Prob = getEdgeProbability(Src, Dst);
2438 Src->addSuccessor(Dst, Prob);
2444 return I->getParent() == BB;
2468 if (CurBB == SwitchBB ||
2474 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2479 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2481 if (TM.Options.NoNaNsFPMath)
2485 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2487 SL->SwitchCases.push_back(CB);
2496 SL->SwitchCases.push_back(CB);
2504 unsigned Depth = 0) {
2513 if (Necessary !=
nullptr) {
2516 if (Necessary->contains(
I))
2535 if (
I.getNumSuccessors() != 2)
2538 if (!
I.isConditional())
2550 if (BPI !=
nullptr) {
2556 std::optional<bool> Likely;
2559 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2563 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2575 if (CostThresh <= 0)
2596 Value *BrCond =
I.getCondition();
2597 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2598 for (
const auto *U : Ins->users()) {
2601 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2614 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2616 for (
const auto &InsPair : RhsDeps) {
2617 if (!ShouldCountInsn(InsPair.first)) {
2618 ToDrop = InsPair.first;
2622 if (ToDrop ==
nullptr)
2624 RhsDeps.erase(ToDrop);
2627 for (
const auto &InsPair : RhsDeps) {
2632 CostOfIncluding +=
TTI->getInstructionCost(
2635 if (CostOfIncluding > CostThresh)
2661 const Value *BOpOp0, *BOpOp1;
2675 if (BOpc == Instruction::And)
2676 BOpc = Instruction::Or;
2677 else if (BOpc == Instruction::Or)
2678 BOpc = Instruction::And;
2684 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2689 TProb, FProb, InvertCond);
2699 if (
Opc == Instruction::Or) {
2720 auto NewTrueProb = TProb / 2;
2721 auto NewFalseProb = TProb / 2 + FProb;
2724 NewFalseProb, InvertCond);
2731 Probs[1], InvertCond);
2733 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2753 auto NewTrueProb = TProb + FProb / 2;
2754 auto NewFalseProb = FProb / 2;
2757 NewFalseProb, InvertCond);
2764 Probs[1], InvertCond);
2773 if (Cases.size() != 2)
return true;
2777 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2778 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2779 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2780 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2786 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2787 Cases[0].CC == Cases[1].CC &&
2790 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2792 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2799void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2805 if (
I.isUnconditional()) {
2811 if (Succ0MBB != NextBlock(BrMBB) ||
2824 const Value *CondVal =
I.getCondition();
2825 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2844 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2846 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2849 const Value *BOp0, *BOp1;
2852 Opcode = Instruction::And;
2854 Opcode = Instruction::Or;
2861 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2862 Opcode, BOp0, BOp1))) {
2864 getEdgeProbability(BrMBB, Succ0MBB),
2865 getEdgeProbability(BrMBB, Succ1MBB),
2870 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2874 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2881 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2887 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2888 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2890 SL->SwitchCases.clear();
2896 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2917 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2924 auto &TLI =
DAG.getTargetLoweringInfo();
2948 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2960 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2964 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2965 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2980 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2996 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2999 DAG.setRoot(BrCond);
3005 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3006 assert(JT.Reg &&
"Should lower JT Header first!");
3007 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3009 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
3010 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
3011 Index.getValue(1), Table, Index);
3012 DAG.setRoot(BrJumpTable);
3020 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3021 const SDLoc &dl = *JT.SL;
3027 DAG.getConstant(JTH.
First, dl, VT));
3042 JT.Reg = JumpTableReg;
3050 Sub.getValueType()),
3053 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3054 MVT::Other, CopyTo, CMP,
3055 DAG.getBasicBlock(JT.Default));
3058 if (JT.MBB != NextBlock(SwitchBB))
3059 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3060 DAG.getBasicBlock(JT.MBB));
3062 DAG.setRoot(BrCond);
3065 if (JT.MBB != NextBlock(SwitchBB))
3066 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3067 DAG.getBasicBlock(JT.MBB)));
3069 DAG.setRoot(CopyTo);
3092 if (PtrTy != PtrMemTy)
3108 auto &
DL =
DAG.getDataLayout();
3117 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3124 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3137 assert(GuardCheckFn &&
"Guard check function is null");
3148 Entry.IsInReg =
true;
3149 Args.push_back(Entry);
3155 getValue(GuardCheckFn), std::move(Args));
3157 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3158 DAG.setRoot(Result.second);
3170 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3176 Guard =
DAG.getPOISON(PtrMemTy);
3186 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3219 auto &
DL =
DAG.getDataLayout();
3227 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3233 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3248 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3249 Entry.IsInReg =
true;
3250 Args.push_back(Entry);
3256 getValue(GuardCheckFn), std::move(Args));
3262 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3270 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3285 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3289 bool UsePtrType =
false;
3313 if (!
B.FallthroughUnreachable)
3314 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3315 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3319 if (!
B.FallthroughUnreachable) {
3327 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3328 DAG.getBasicBlock(
B.Default));
3332 if (
MBB != NextBlock(SwitchBB))
3333 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3350 if (PopCount == 1) {
3357 }
else if (PopCount == BB.
Range) {
3365 DAG.getConstant(1, dl, VT), ShiftOp);
3369 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3376 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3378 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3386 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3389 if (NextMBB != NextBlock(SwitchBB))
3390 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3391 DAG.getBasicBlock(NextMBB));
3396void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3414 const Value *Callee(
I.getCalledOperand());
3417 visitInlineAsm(
I, EHPadBB);
3422 case Intrinsic::donothing:
3424 case Intrinsic::seh_try_begin:
3425 case Intrinsic::seh_scope_begin:
3426 case Intrinsic::seh_try_end:
3427 case Intrinsic::seh_scope_end:
3433 case Intrinsic::experimental_patchpoint_void:
3434 case Intrinsic::experimental_patchpoint:
3435 visitPatchpoint(
I, EHPadBB);
3437 case Intrinsic::experimental_gc_statepoint:
3443 case Intrinsic::wasm_throw: {
3445 std::array<SDValue, 4>
Ops = {
3456 case Intrinsic::wasm_rethrow: {
3457 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3458 std::array<SDValue, 2>
Ops = {
3467 }
else if (
I.hasDeoptState()) {
3488 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3489 BranchProbability EHPadBBProb =
3495 addSuccessorWithProb(InvokeMBB, Return);
3496 for (
auto &UnwindDest : UnwindDests) {
3497 UnwindDest.first->setIsEHPad();
3498 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3504 DAG.getBasicBlock(Return)));
3507void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3508 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3515 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3520 SmallPtrSet<BasicBlock *, 8> Dests;
3521 Dests.
insert(
I.getDefaultDest());
3526 for (BasicBlock *Dest :
I.getIndirectDests()) {
3528 Target->setIsInlineAsmBrIndirectTarget();
3534 Target->setLabelMustBeEmitted();
3536 if (Dests.
insert(Dest).second)
3544 DAG.getBasicBlock(Return)));
3547void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3548 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3551void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3553 "Call to landingpad not in landing pad!");
3557 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3573 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3578 if (
FuncInfo.ExceptionPointerVirtReg) {
3579 Ops[0] =
DAG.getZExtOrTrunc(
3580 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3587 Ops[1] =
DAG.getZExtOrTrunc(
3588 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3595 DAG.getVTList(ValueVTs),
Ops);
3603 if (JTB.first.HeaderBB ==
First)
3604 JTB.first.HeaderBB =
Last;
3617 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3619 bool Inserted =
Done.insert(BB).second;
3624 addSuccessorWithProb(IndirectBrMBB, Succ);
3634 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3635 DAG.getTarget().Options.NoTrapAfterNoreturn))
3641void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3644 Flags.copyFMF(*FPOp);
3652void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3655 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3656 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3659 Flags.setExact(ExactOp->isExact());
3661 Flags.setDisjoint(DisjointOp->isDisjoint());
3663 Flags.copyFMF(*FPOp);
3672void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3676 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3681 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3683 "Unexpected shift type");
3693 if (
const OverflowingBinaryOperator *OFBinOp =
3695 nuw = OFBinOp->hasNoUnsignedWrap();
3696 nsw = OFBinOp->hasNoSignedWrap();
3698 if (
const PossiblyExactOperator *ExactOp =
3700 exact = ExactOp->isExact();
3703 Flags.setExact(exact);
3704 Flags.setNoSignedWrap(nsw);
3705 Flags.setNoUnsignedWrap(nuw);
3711void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3722void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3728 auto &TLI =
DAG.getTargetLoweringInfo();
3741 Flags.setSameSign(
I.hasSameSign());
3742 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3744 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3749void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3756 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3760 Flags.copyFMF(*FPMO);
3761 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3763 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3772 return isa<SelectInst>(V);
3776void SelectionDAGBuilder::visitSelect(
const User &
I) {
3780 unsigned NumValues = ValueVTs.
size();
3781 if (NumValues == 0)
return;
3791 bool IsUnaryAbs =
false;
3792 bool Negate =
false;
3796 Flags.copyFMF(*FPOp);
3798 Flags.setUnpredictable(
3803 EVT VT = ValueVTs[0];
3804 LLVMContext &Ctx = *
DAG.getContext();
3805 auto &TLI =
DAG.getTargetLoweringInfo();
3815 bool UseScalarMinMax = VT.
isVector() &&
3824 switch (SPR.Flavor) {
3830 switch (SPR.NaNBehavior) {
3843 switch (SPR.NaNBehavior) {
3887 for (
unsigned i = 0; i != NumValues; ++i) {
3893 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3896 for (
unsigned i = 0; i != NumValues; ++i) {
3900 Values[i] =
DAG.getNode(
3907 DAG.getVTList(ValueVTs), Values));
3910void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3913 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3917 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3918 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3924void SelectionDAGBuilder::visitZExt(
const User &
I) {
3928 auto &TLI =
DAG.getTargetLoweringInfo();
3933 Flags.setNonNeg(PNI->hasNonNeg());
3938 if (
Flags.hasNonNeg() &&
3947void SelectionDAGBuilder::visitSExt(
const User &
I) {
3951 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3956void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3962 Flags.copyFMF(*TruncInst);
3963 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3966 DAG.getTargetConstant(
3971void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3974 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3978 Flags.copyFMF(*TruncInst);
3982void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3985 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3990void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3993 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3998void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4001 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4005 Flags.setNonNeg(PNI->hasNonNeg());
4010void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4013 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4018void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4021 const auto &TLI =
DAG.getTargetLoweringInfo();
4029void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4033 auto &TLI =
DAG.getTargetLoweringInfo();
4034 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4043void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4047 auto &TLI =
DAG.getTargetLoweringInfo();
4055void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4058 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4063 if (DestVT !=
N.getValueType())
4071 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4077void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4078 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4079 const Value *SV =
I.getOperand(0);
4084 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4086 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4092void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4093 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4100 InVec, InVal, InIdx));
4103void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4104 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4113void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4118 Mask = SVI->getShuffleMask();
4122 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4126 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4131 DAG.getVectorIdxConstant(0,
DL));
4142 unsigned MaskNumElts =
Mask.size();
4144 if (SrcNumElts == MaskNumElts) {
4150 if (SrcNumElts < MaskNumElts) {
4154 if (MaskNumElts % SrcNumElts == 0) {
4158 unsigned NumConcat = MaskNumElts / SrcNumElts;
4159 bool IsConcat =
true;
4160 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4161 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4167 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4168 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4169 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4174 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4181 for (
auto Src : ConcatSrcs) {
4194 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4195 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4211 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4212 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4214 if (Idx >= (
int)SrcNumElts)
4215 Idx -= SrcNumElts - PaddedMaskNumElts;
4223 if (MaskNumElts != PaddedMaskNumElts)
4225 DAG.getVectorIdxConstant(0,
DL));
4231 assert(SrcNumElts > MaskNumElts);
4235 int StartIdx[2] = {-1, -1};
4236 bool CanExtract =
true;
4237 for (
int Idx : Mask) {
4242 if (Idx >= (
int)SrcNumElts) {
4250 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4251 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4252 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4256 StartIdx[Input] = NewStartIdx;
4259 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4265 for (
unsigned Input = 0; Input < 2; ++Input) {
4266 SDValue &Src = Input == 0 ? Src1 : Src2;
4267 if (StartIdx[Input] < 0)
4268 Src =
DAG.getUNDEF(VT);
4271 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4276 SmallVector<int, 8> MappedOps(Mask);
4277 for (
int &Idx : MappedOps) {
4278 if (Idx >= (
int)SrcNumElts)
4279 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4284 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4293 for (
int Idx : Mask) {
4297 Res =
DAG.getUNDEF(EltVT);
4299 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4300 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4303 DAG.getVectorIdxConstant(Idx,
DL));
4313 ArrayRef<unsigned> Indices =
I.getIndices();
4314 const Value *Op0 =
I.getOperand(0);
4316 Type *AggTy =
I.getType();
4323 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4329 unsigned NumAggValues = AggValueVTs.
size();
4330 unsigned NumValValues = ValValueVTs.
size();
4334 if (!NumAggValues) {
4342 for (; i != LinearIndex; ++i)
4343 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4348 for (; i != LinearIndex + NumValValues; ++i)
4349 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4353 for (; i != NumAggValues; ++i)
4354 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4358 DAG.getVTList(AggValueVTs), Values));
4362 ArrayRef<unsigned> Indices =
I.getIndices();
4363 const Value *Op0 =
I.getOperand(0);
4365 Type *ValTy =
I.getType();
4370 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4374 unsigned NumValValues = ValValueVTs.
size();
4377 if (!NumValValues) {
4386 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4387 Values[i - LinearIndex] =
4393 DAG.getVTList(ValValueVTs), Values));
4396void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4397 Value *Op0 =
I.getOperand(0);
4403 auto &TLI =
DAG.getTargetLoweringInfo();
4408 bool IsVectorGEP =
I.getType()->isVectorTy();
4409 ElementCount VectorElementCount =
4415 const Value *Idx = GTI.getOperand();
4416 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4421 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4431 N =
DAG.getMemBasePlusOffset(
4432 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4438 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4440 TypeSize ElementSize =
4441 GTI.getSequentialElementStride(
DAG.getDataLayout());
4446 bool ElementScalable = ElementSize.
isScalable();
4452 C =
C->getSplatValue();
4455 if (CI && CI->isZero())
4457 if (CI && !ElementScalable) {
4458 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4461 if (
N.getValueType().isVector())
4462 OffsVal =
DAG.getConstant(
4465 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4472 Flags.setNoUnsignedWrap(
true);
4475 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4477 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4485 if (
N.getValueType().isVector()) {
4487 VectorElementCount);
4488 IdxN =
DAG.getSplat(VT, dl, IdxN);
4492 N =
DAG.getSplat(VT, dl,
N);
4498 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4500 SDNodeFlags ScaleFlags;
4509 if (ElementScalable) {
4510 EVT VScaleTy =
N.getValueType().getScalarType();
4512 ISD::VSCALE, dl, VScaleTy,
4513 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4514 if (
N.getValueType().isVector())
4515 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4516 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4521 if (ElementMul != 1) {
4522 if (ElementMul.isPowerOf2()) {
4523 unsigned Amt = ElementMul.logBase2();
4526 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4529 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4531 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4541 SDNodeFlags AddFlags;
4545 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4549 if (IsVectorGEP && !
N.getValueType().isVector()) {
4551 N =
DAG.getSplat(VT, dl,
N);
4562 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4567void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4574 Type *Ty =
I.getAllocatedType();
4575 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4576 auto &
DL =
DAG.getDataLayout();
4577 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4578 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4584 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4587 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4588 DAG.getVScale(dl, IntPtr,
4594 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4595 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4601 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4602 if (*Alignment <= StackAlign)
4603 Alignment = std::nullopt;
4605 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4610 DAG.getConstant(StackAlignMask, dl, IntPtr),
4615 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4619 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4621 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4629 return I.getMetadata(LLVMContext::MD_range);
4634 if (std::optional<ConstantRange> CR = CB->getRange())
4638 return std::nullopt;
4643 return CB->getRetNoFPClass();
4647void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4649 return visitAtomicLoad(
I);
4651 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4652 const Value *SV =
I.getOperand(0);
4657 if (Arg->hasSwiftErrorAttr())
4658 return visitLoadFromSwiftError(
I);
4662 if (Alloca->isSwiftError())
4663 return visitLoadFromSwiftError(
I);
4669 Type *Ty =
I.getType();
4673 unsigned NumValues = ValueVTs.
size();
4677 Align Alignment =
I.getAlign();
4678 AAMDNodes AAInfo =
I.getAAMetadata();
4680 bool isVolatile =
I.isVolatile();
4685 bool ConstantMemory =
false;
4692 BatchAA->pointsToConstantMemory(MemoryLocation(
4697 Root =
DAG.getEntryNode();
4698 ConstantMemory =
true;
4702 Root =
DAG.getRoot();
4713 unsigned ChainI = 0;
4714 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4730 MachinePointerInfo PtrInfo =
4732 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4733 : MachinePointerInfo();
4735 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4736 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4737 MMOFlags, AAInfo, Ranges);
4738 Chains[ChainI] =
L.getValue(1);
4740 if (MemVTs[i] != ValueVTs[i])
4741 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4746 if (!ConstantMemory) {
4756 DAG.getVTList(ValueVTs), Values));
4759void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4760 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4761 "call visitStoreToSwiftError when backend supports swifterror");
4764 SmallVector<uint64_t, 4>
Offsets;
4765 const Value *SrcV =
I.getOperand(0);
4767 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4768 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4769 "expect a single EVT for swifterror");
4778 SDValue(Src.getNode(), Src.getResNo()));
4779 DAG.setRoot(CopyNode);
4782void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4783 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4784 "call visitLoadFromSwiftError when backend supports swifterror");
4787 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4788 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4789 "Support volatile, non temporal, invariant for load_from_swift_error");
4791 const Value *SV =
I.getOperand(0);
4792 Type *Ty =
I.getType();
4795 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4797 I.getAAMetadata()))) &&
4798 "load_from_swift_error should not be constant memory");
4801 SmallVector<uint64_t, 4>
Offsets;
4803 ValueVTs,
nullptr, &Offsets, 0);
4804 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4805 "expect a single EVT for swifterror");
4815void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4817 return visitAtomicStore(
I);
4819 const Value *SrcV =
I.getOperand(0);
4820 const Value *PtrV =
I.getOperand(1);
4822 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4827 if (Arg->hasSwiftErrorAttr())
4828 return visitStoreToSwiftError(
I);
4832 if (Alloca->isSwiftError())
4833 return visitStoreToSwiftError(
I);
4840 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4841 unsigned NumValues = ValueVTs.
size();
4854 Align Alignment =
I.getAlign();
4855 AAMDNodes AAInfo =
I.getAAMetadata();
4859 unsigned ChainI = 0;
4860 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4870 MachinePointerInfo PtrInfo =
4872 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4873 : MachinePointerInfo();
4877 if (MemVTs[i] != ValueVTs[i])
4878 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4880 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4881 Chains[ChainI] = St;
4887 DAG.setRoot(StoreNode);
4890void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4891 bool IsCompressing) {
4894 Value *Src0Operand =
I.getArgOperand(0);
4895 Value *PtrOperand =
I.getArgOperand(1);
4896 Value *MaskOperand =
I.getArgOperand(2);
4897 Align Alignment =
I.getParamAlign(1).valueOrOne();
4907 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4910 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4911 MachinePointerInfo(PtrOperand), MMOFlags,
4914 const auto &TLI =
DAG.getTargetLoweringInfo();
4917 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4918 I.getArgOperand(0)->getType(),
true)
4924 DAG.setRoot(StoreNode);
4954 C =
C->getSplatValue();
4968 if (!
GEP ||
GEP->getParent() != CurBB)
4971 if (
GEP->getNumOperands() != 2)
4974 const Value *BasePtr =
GEP->getPointerOperand();
4975 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4981 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4986 if (ScaleVal != 1 &&
4998void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5002 const Value *Ptr =
I.getArgOperand(1);
5006 Align Alignment =
I.getParamAlign(1).valueOrOne();
5007 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5016 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5026 EVT IdxVT =
Index.getValueType();
5034 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5036 DAG.setRoot(Scatter);
5040void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5043 Value *PtrOperand =
I.getArgOperand(0);
5044 Value *MaskOperand =
I.getArgOperand(1);
5045 Value *Src0Operand =
I.getArgOperand(2);
5046 Align Alignment =
I.getParamAlign(0).valueOrOne();
5054 AAMDNodes AAInfo =
I.getAAMetadata();
5061 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5064 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5067 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5068 MachinePointerInfo(PtrOperand), MMOFlags,
5071 const auto &TLI =
DAG.getTargetLoweringInfo();
5078 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5083 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5090void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5094 const Value *Ptr =
I.getArgOperand(0);
5098 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5100 Align Alignment =
I.getParamAlign(0).valueOrOne();
5111 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5123 EVT IdxVT =
Index.getValueType();
5132 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5148 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5150 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5153 MachineFunction &MF =
DAG.getMachineFunction();
5155 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5156 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5159 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5160 dl, MemVT, VTs, InChain,
5168 DAG.setRoot(OutChain);
5171void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5174 switch (
I.getOperation()) {
5192 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5195 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5198 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5201 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5204 NT = ISD::ATOMIC_LOAD_USUB_COND;
5207 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5216 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5219 MachineFunction &MF =
DAG.getMachineFunction();
5221 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5222 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5225 DAG.getAtomic(NT, dl, MemVT, InChain,
5232 DAG.setRoot(OutChain);
5235void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5237 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5240 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5242 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5249void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5256 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5267 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5268 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5269 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5279 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5282 DAG.setRoot(OutChain);
5285void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5293 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5303 MachineFunction &MF =
DAG.getMachineFunction();
5305 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5306 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5310 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5314 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5317 DAG.setRoot(OutChain);
5325std::pair<bool, bool>
5326SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5328 bool HasChain = !
F->doesNotAccessMemory();
5330 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5332 return {HasChain, OnlyLoad};
5336 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5338 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5345 Ops.push_back(
DAG.getRoot());
5358 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5359 const Value *Arg =
I.getArgOperand(i);
5360 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5368 assert(CI->getBitWidth() <= 64 &&
5369 "large intrinsic immediates not handled");
5370 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5377 if (std::optional<OperandBundleUse> Bundle =
5379 Value *Token = Bundle->Inputs[0].get();
5381 assert(
Ops.back().getValueType() != MVT::Glue &&
5382 "Did not expect another glue node here.");
5384 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5385 Ops.push_back(ConvControlToken);
5393 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5401 return DAG.getVTList(ValueVTs);
5405SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5428 if (
I.getType()->isVoidTy())
5443void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5445 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5448 TargetLowering::IntrinsicInfo
Info;
5449 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5450 bool IsTgtMemIntrinsic =
5454 I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &
Info :
nullptr);
5455 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5460 Flags.copyFMF(*FPMO);
5461 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5468 if (IsTgtMemIntrinsic) {
5473 MachinePointerInfo MPI;
5475 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5476 else if (
Info.fallbackAddressSpace)
5477 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5478 EVT MemVT =
Info.memVT;
5480 if (
Size.hasValue() && !
Size.getValue())
5482 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5483 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5484 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5489 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5492 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5508 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5549 SDValue TwoToFractionalPartOfX;
5617 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5618 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5626 if (
Op.getValueType() == MVT::f32 &&
5641 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5650 if (
Op.getValueType() == MVT::f32 &&
5740 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5749 if (
Op.getValueType() == MVT::f32 &&
5833 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5837 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5846 if (
Op.getValueType() == MVT::f32 &&
5923 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5927 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5934 if (
Op.getValueType() == MVT::f32 &&
5939 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5947 bool IsExp10 =
false;
5948 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5952 IsExp10 = LHSC->isExactlyValue(Ten);
5979 unsigned Val = RHSC->getSExtValue();
6008 CurSquare, CurSquare);
6013 if (RHSC->getSExtValue() < 0)
6027 EVT VT =
LHS.getValueType();
6050 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6054 Opcode, VT, ScaleInt);
6089 switch (
N.getOpcode()) {
6093 Op.getValueType().getSizeInBits());
6118bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6125 MachineFunction &MF =
DAG.getMachineFunction();
6126 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6130 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6135 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6142 auto *NewDIExpr = FragExpr;
6149 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6152 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6153 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6157 if (Kind == FuncArgumentDbgValueKind::Value) {
6162 if (!IsInEntryBlock)
6178 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6179 !
DL->getInlinedAt();
6181 if (!IsInPrologue && !VariableIsFunctionInputArg)
6215 if (VariableIsFunctionInputArg) {
6217 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6218 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6219 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6220 return !NodeMap[
V].getNode();
6225 bool IsIndirect =
false;
6226 std::optional<MachineOperand>
Op;
6228 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6229 if (FI != std::numeric_limits<int>::max())
6233 if (!
Op &&
N.getNode()) {
6236 if (ArgRegsAndSizes.
size() == 1)
6237 Reg = ArgRegsAndSizes.
front().first;
6240 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6247 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6251 if (!
Op &&
N.getNode()) {
6255 if (FrameIndexSDNode *FINode =
6265 for (
const auto &RegAndSize : SplitRegs) {
6269 int RegFragmentSizeInBits = RegAndSize.second;
6271 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6274 if (
Offset >= ExprFragmentSizeInBits)
6278 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6279 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6284 Expr,
Offset, RegFragmentSizeInBits);
6285 Offset += RegAndSize.second;
6288 if (!FragmentExpr) {
6289 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6291 DAG.AddDbgValue(SDV,
false);
6294 MachineInstr *NewMI =
6295 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6296 Kind != FuncArgumentDbgValueKind::Value);
6297 FuncInfo.ArgDbgValues.push_back(NewMI);
6304 if (VMI !=
FuncInfo.ValueMap.end()) {
6305 const auto &TLI =
DAG.getTargetLoweringInfo();
6306 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6307 V->getType(), std::nullopt);
6308 if (RFV.occupiesMultipleRegs()) {
6309 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6314 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6315 }
else if (ArgRegsAndSizes.
size() > 1) {
6318 splitMultiRegDbgValue(ArgRegsAndSizes);
6327 "Expected inlined-at fields to agree");
6328 MachineInstr *NewMI =
nullptr;
6331 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6333 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6337 FuncInfo.ArgDbgValues.push_back(NewMI);
6346 unsigned DbgSDNodeOrder) {
6358 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6359 false, dl, DbgSDNodeOrder);
6361 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6362 false, dl, DbgSDNodeOrder);
6367 case Intrinsic::smul_fix:
6369 case Intrinsic::umul_fix:
6371 case Intrinsic::smul_fix_sat:
6373 case Intrinsic::umul_fix_sat:
6375 case Intrinsic::sdiv_fix:
6377 case Intrinsic::udiv_fix:
6379 case Intrinsic::sdiv_fix_sat:
6381 case Intrinsic::udiv_fix_sat:
6394 "expected call_preallocated_setup Value");
6395 for (
const auto *U : PreallocatedSetup->
users()) {
6397 const Function *Fn = UseCall->getCalledFunction();
6398 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6408bool SelectionDAGBuilder::visitEntryValueDbgValue(
6418 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6419 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6421 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6422 "couldn't find an associated register for the Argument\n");
6425 Register ArgVReg = ArgIt->getSecond();
6427 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6428 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6429 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6430 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6431 DAG.AddDbgValue(SDV,
false );
6434 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6435 "couldn't find a physical register\n");
6440void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6443 switch (Intrinsic) {
6444 case Intrinsic::experimental_convergence_anchor:
6445 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6447 case Intrinsic::experimental_convergence_entry:
6448 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6450 case Intrinsic::experimental_convergence_loop: {
6452 auto *Token = Bundle->Inputs[0].get();
6453 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6460void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6461 unsigned IntrinsicID) {
6464 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6465 "Tried to lower unsupported histogram type");
6471 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6472 DataLayout TargetDL =
DAG.getDataLayout();
6474 Align Alignment =
DAG.getEVTAlign(VT);
6487 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6488 MachinePointerInfo(AS),
6499 EVT IdxVT =
Index.getValueType();
6506 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6509 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6513 DAG.setRoot(Histogram);
6516void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6518 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6519 "Tried lowering invalid vector extract last");
6521 const DataLayout &Layout =
DAG.getDataLayout();
6525 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6529 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6535 EVT BoolVT =
Mask.getValueType().getScalarType();
6536 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6537 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6544void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6546 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6553 Flags.copyFMF(*FPOp);
6555 switch (Intrinsic) {
6558 visitTargetIntrinsic(
I, Intrinsic);
6560 case Intrinsic::vscale: {
6565 case Intrinsic::vastart: visitVAStart(
I);
return;
6566 case Intrinsic::vaend: visitVAEnd(
I);
return;
6567 case Intrinsic::vacopy: visitVACopy(
I);
return;
6568 case Intrinsic::returnaddress:
6573 case Intrinsic::addressofreturnaddress:
6578 case Intrinsic::sponentry:
6583 case Intrinsic::frameaddress:
6588 case Intrinsic::read_volatile_register:
6589 case Intrinsic::read_register: {
6590 Value *
Reg =
I.getArgOperand(0);
6596 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6601 case Intrinsic::write_register: {
6602 Value *
Reg =
I.getArgOperand(0);
6603 Value *RegValue =
I.getArgOperand(1);
6611 case Intrinsic::memcpy:
6612 case Intrinsic::memcpy_inline: {
6618 "memcpy_inline needs constant size");
6620 Align DstAlign = MCI.getDestAlign().valueOrOne();
6621 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6622 Align Alignment = std::min(DstAlign, SrcAlign);
6623 bool isVol = MCI.isVolatile();
6627 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6628 MCI.isForceInlined(), &
I, std::nullopt,
6629 MachinePointerInfo(
I.getArgOperand(0)),
6630 MachinePointerInfo(
I.getArgOperand(1)),
6632 updateDAGForMaybeTailCall(MC);
6635 case Intrinsic::memset:
6636 case Intrinsic::memset_inline: {
6642 "memset_inline needs constant size");
6644 Align DstAlign = MSII.getDestAlign().valueOrOne();
6645 bool isVol = MSII.isVolatile();
6648 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6649 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6650 updateDAGForMaybeTailCall(MC);
6653 case Intrinsic::memmove: {
6659 Align DstAlign = MMI.getDestAlign().valueOrOne();
6660 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6661 Align Alignment = std::min(DstAlign, SrcAlign);
6662 bool isVol = MMI.isVolatile();
6666 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6668 MachinePointerInfo(
I.getArgOperand(0)),
6669 MachinePointerInfo(
I.getArgOperand(1)),
6671 updateDAGForMaybeTailCall(MM);
6674 case Intrinsic::memcpy_element_unordered_atomic: {
6680 Type *LengthTy =
MI.getLength()->getType();
6681 unsigned ElemSz =
MI.getElementSizeInBytes();
6685 isTC, MachinePointerInfo(
MI.getRawDest()),
6686 MachinePointerInfo(
MI.getRawSource()));
6687 updateDAGForMaybeTailCall(MC);
6690 case Intrinsic::memmove_element_unordered_atomic: {
6696 Type *LengthTy =
MI.getLength()->getType();
6697 unsigned ElemSz =
MI.getElementSizeInBytes();
6701 isTC, MachinePointerInfo(
MI.getRawDest()),
6702 MachinePointerInfo(
MI.getRawSource()));
6703 updateDAGForMaybeTailCall(MC);
6706 case Intrinsic::memset_element_unordered_atomic: {
6712 Type *LengthTy =
MI.getLength()->getType();
6713 unsigned ElemSz =
MI.getElementSizeInBytes();
6717 isTC, MachinePointerInfo(
MI.getRawDest()));
6718 updateDAGForMaybeTailCall(MC);
6721 case Intrinsic::call_preallocated_setup: {
6723 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6724 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6730 case Intrinsic::call_preallocated_arg: {
6732 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6739 ISD::PREALLOCATED_ARG, sdl,
6746 case Intrinsic::eh_typeid_for: {
6749 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6750 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6755 case Intrinsic::eh_return_i32:
6756 case Intrinsic::eh_return_i64:
6757 DAG.getMachineFunction().setCallsEHReturn(
true);
6764 case Intrinsic::eh_unwind_init:
6765 DAG.getMachineFunction().setCallsUnwindInit(
true);
6767 case Intrinsic::eh_dwarf_cfa:
6772 case Intrinsic::eh_sjlj_callsite: {
6774 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6779 case Intrinsic::eh_sjlj_functioncontext: {
6781 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6784 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6788 case Intrinsic::eh_sjlj_setjmp: {
6793 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6795 DAG.setRoot(
Op.getValue(1));
6798 case Intrinsic::eh_sjlj_longjmp:
6802 case Intrinsic::eh_sjlj_setup_dispatch:
6806 case Intrinsic::masked_gather:
6807 visitMaskedGather(
I);
6809 case Intrinsic::masked_load:
6812 case Intrinsic::masked_scatter:
6813 visitMaskedScatter(
I);
6815 case Intrinsic::masked_store:
6816 visitMaskedStore(
I);
6818 case Intrinsic::masked_expandload:
6819 visitMaskedLoad(
I,
true );
6821 case Intrinsic::masked_compressstore:
6822 visitMaskedStore(
I,
true );
6824 case Intrinsic::powi:
6828 case Intrinsic::log:
6831 case Intrinsic::log2:
6835 case Intrinsic::log10:
6839 case Intrinsic::exp:
6842 case Intrinsic::exp2:
6846 case Intrinsic::pow:
6850 case Intrinsic::sqrt:
6851 case Intrinsic::fabs:
6852 case Intrinsic::sin:
6853 case Intrinsic::cos:
6854 case Intrinsic::tan:
6855 case Intrinsic::asin:
6856 case Intrinsic::acos:
6857 case Intrinsic::atan:
6858 case Intrinsic::sinh:
6859 case Intrinsic::cosh:
6860 case Intrinsic::tanh:
6861 case Intrinsic::exp10:
6862 case Intrinsic::floor:
6863 case Intrinsic::ceil:
6864 case Intrinsic::trunc:
6865 case Intrinsic::rint:
6866 case Intrinsic::nearbyint:
6867 case Intrinsic::round:
6868 case Intrinsic::roundeven:
6869 case Intrinsic::canonicalize: {
6872 switch (Intrinsic) {
6874 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6875 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6876 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6877 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6878 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6879 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6880 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6881 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6882 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6883 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6884 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6885 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6886 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6887 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6888 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6889 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6890 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6891 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6892 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6898 getValue(
I.getArgOperand(0)).getValueType(),
6902 case Intrinsic::atan2:
6904 getValue(
I.getArgOperand(0)).getValueType(),
6908 case Intrinsic::lround:
6909 case Intrinsic::llround:
6910 case Intrinsic::lrint:
6911 case Intrinsic::llrint: {
6914 switch (Intrinsic) {
6916 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6917 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6918 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6919 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6928 case Intrinsic::minnum:
6930 getValue(
I.getArgOperand(0)).getValueType(),
6934 case Intrinsic::maxnum:
6936 getValue(
I.getArgOperand(0)).getValueType(),
6940 case Intrinsic::minimum:
6942 getValue(
I.getArgOperand(0)).getValueType(),
6946 case Intrinsic::maximum:
6948 getValue(
I.getArgOperand(0)).getValueType(),
6952 case Intrinsic::minimumnum:
6954 getValue(
I.getArgOperand(0)).getValueType(),
6958 case Intrinsic::maximumnum:
6960 getValue(
I.getArgOperand(0)).getValueType(),
6964 case Intrinsic::copysign:
6966 getValue(
I.getArgOperand(0)).getValueType(),
6970 case Intrinsic::ldexp:
6972 getValue(
I.getArgOperand(0)).getValueType(),
6976 case Intrinsic::modf:
6977 case Intrinsic::sincos:
6978 case Intrinsic::sincospi:
6979 case Intrinsic::frexp: {
6981 switch (Intrinsic) {
6984 case Intrinsic::sincos:
6985 Opcode = ISD::FSINCOS;
6987 case Intrinsic::sincospi:
6988 Opcode = ISD::FSINCOSPI;
6990 case Intrinsic::modf:
6991 Opcode = ISD::FMODF;
6993 case Intrinsic::frexp:
6994 Opcode = ISD::FFREXP;
6999 SDVTList VTs =
DAG.getVTList(ValueVTs);
7001 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7004 case Intrinsic::arithmetic_fence: {
7006 getValue(
I.getArgOperand(0)).getValueType(),
7010 case Intrinsic::fma:
7016#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7017 case Intrinsic::INTRINSIC:
7018#include "llvm/IR/ConstrainedOps.def"
7021#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7022#include "llvm/IR/VPIntrinsics.def"
7025 case Intrinsic::fptrunc_round: {
7029 std::optional<RoundingMode> RoundMode =
7037 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7042 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7047 case Intrinsic::fmuladd: {
7052 getValue(
I.getArgOperand(0)).getValueType(),
7059 getValue(
I.getArgOperand(0)).getValueType(),
7075 case Intrinsic::convert_to_fp16:
7079 DAG.getTargetConstant(0, sdl,
7082 case Intrinsic::convert_from_fp16:
7085 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7088 case Intrinsic::fptosi_sat: {
7095 case Intrinsic::fptoui_sat: {
7102 case Intrinsic::set_rounding:
7103 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7108 case Intrinsic::is_fpclass: {
7109 const DataLayout DLayout =
DAG.getDataLayout();
7111 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7114 MachineFunction &MF =
DAG.getMachineFunction();
7118 Flags.setNoFPExcept(
7119 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7135 case Intrinsic::get_fpenv: {
7136 const DataLayout DLayout =
DAG.getDataLayout();
7138 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7144 ISD::GET_FPENV, sdl,
7153 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7156 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7157 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7163 case Intrinsic::set_fpenv: {
7164 const DataLayout DLayout =
DAG.getDataLayout();
7167 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7172 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7180 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7182 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7185 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7190 case Intrinsic::reset_fpenv:
7191 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7193 case Intrinsic::get_fpmode:
7195 ISD::GET_FPMODE, sdl,
7202 case Intrinsic::set_fpmode:
7203 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7207 case Intrinsic::reset_fpmode: {
7208 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7212 case Intrinsic::pcmarker: {
7214 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7217 case Intrinsic::readcyclecounter: {
7219 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7220 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7225 case Intrinsic::readsteadycounter: {
7227 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7228 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7233 case Intrinsic::bitreverse:
7235 getValue(
I.getArgOperand(0)).getValueType(),
7238 case Intrinsic::bswap:
7240 getValue(
I.getArgOperand(0)).getValueType(),
7243 case Intrinsic::cttz: {
7251 case Intrinsic::ctlz: {
7259 case Intrinsic::ctpop: {
7265 case Intrinsic::fshl:
7266 case Intrinsic::fshr: {
7267 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7271 EVT VT =
X.getValueType();
7282 case Intrinsic::sadd_sat: {
7288 case Intrinsic::uadd_sat: {
7294 case Intrinsic::ssub_sat: {
7300 case Intrinsic::usub_sat: {
7306 case Intrinsic::sshl_sat: {
7312 case Intrinsic::ushl_sat: {
7318 case Intrinsic::smul_fix:
7319 case Intrinsic::umul_fix:
7320 case Intrinsic::smul_fix_sat:
7321 case Intrinsic::umul_fix_sat: {
7329 case Intrinsic::sdiv_fix:
7330 case Intrinsic::udiv_fix:
7331 case Intrinsic::sdiv_fix_sat:
7332 case Intrinsic::udiv_fix_sat: {
7337 Op1, Op2, Op3,
DAG, TLI));
7340 case Intrinsic::smax: {
7346 case Intrinsic::smin: {
7352 case Intrinsic::umax: {
7358 case Intrinsic::umin: {
7364 case Intrinsic::abs: {
7370 case Intrinsic::scmp: {
7377 case Intrinsic::ucmp: {
7384 case Intrinsic::stacksave: {
7387 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7392 case Intrinsic::stackrestore:
7394 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7396 case Intrinsic::get_dynamic_area_offset: {
7399 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7405 case Intrinsic::stackguard: {
7406 MachineFunction &MF =
DAG.getMachineFunction();
7412 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7416 LLVMContext &Ctx = *
DAG.getContext();
7417 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7424 MachinePointerInfo(
Global, 0), Align,
7433 case Intrinsic::stackprotector: {
7435 MachineFunction &MF =
DAG.getMachineFunction();
7455 Chain, sdl, Src, FIN,
7462 case Intrinsic::objectsize:
7465 case Intrinsic::is_constant:
7468 case Intrinsic::annotation:
7469 case Intrinsic::ptr_annotation:
7470 case Intrinsic::launder_invariant_group:
7471 case Intrinsic::strip_invariant_group:
7476 case Intrinsic::type_test:
7477 case Intrinsic::public_type_test:
7481 case Intrinsic::assume:
7482 case Intrinsic::experimental_noalias_scope_decl:
7483 case Intrinsic::var_annotation:
7484 case Intrinsic::sideeffect:
7489 case Intrinsic::codeview_annotation: {
7491 MachineFunction &MF =
DAG.getMachineFunction();
7495 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7500 case Intrinsic::init_trampoline: {
7508 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7511 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7516 case Intrinsic::adjust_trampoline:
7521 case Intrinsic::gcroot: {
7522 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7523 "only valid in functions with gc specified, enforced by Verifier");
7525 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7532 case Intrinsic::gcread:
7533 case Intrinsic::gcwrite:
7535 case Intrinsic::get_rounding:
7541 case Intrinsic::expect:
7542 case Intrinsic::expect_with_probability:
7548 case Intrinsic::ubsantrap:
7549 case Intrinsic::debugtrap:
7550 case Intrinsic::trap: {
7551 StringRef TrapFuncName =
7552 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7553 if (TrapFuncName.
empty()) {
7554 switch (Intrinsic) {
7555 case Intrinsic::trap:
7556 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7558 case Intrinsic::debugtrap:
7559 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7561 case Intrinsic::ubsantrap:
7563 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7564 DAG.getTargetConstant(
7570 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7571 I.hasFnAttr(Attribute::NoMerge));
7575 if (Intrinsic == Intrinsic::ubsantrap) {
7576 Value *Arg =
I.getArgOperand(0);
7580 TargetLowering::CallLoweringInfo CLI(
DAG);
7581 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7583 DAG.getExternalSymbol(TrapFuncName.
data(),
7586 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7592 case Intrinsic::allow_runtime_check:
7593 case Intrinsic::allow_ubsan_check:
7597 case Intrinsic::uadd_with_overflow:
7598 case Intrinsic::sadd_with_overflow:
7599 case Intrinsic::usub_with_overflow:
7600 case Intrinsic::ssub_with_overflow:
7601 case Intrinsic::umul_with_overflow:
7602 case Intrinsic::smul_with_overflow: {
7604 switch (Intrinsic) {
7606 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7607 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7608 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7609 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7610 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7611 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7617 EVT OverflowVT = MVT::i1;
7622 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7626 case Intrinsic::prefetch: {
7639 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7641 std::nullopt, Flags);
7647 DAG.setRoot(Result);
7650 case Intrinsic::lifetime_start:
7651 case Intrinsic::lifetime_end: {
7652 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7658 if (!LifetimeObject)
7663 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7664 if (SI ==
FuncInfo.StaticAllocaMap.end())
7668 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7672 case Intrinsic::pseudoprobe: {
7680 case Intrinsic::invariant_start:
7685 case Intrinsic::invariant_end:
7688 case Intrinsic::clear_cache: {
7693 {InputChain, StartVal, EndVal});
7698 case Intrinsic::donothing:
7699 case Intrinsic::seh_try_begin:
7700 case Intrinsic::seh_scope_begin:
7701 case Intrinsic::seh_try_end:
7702 case Intrinsic::seh_scope_end:
7705 case Intrinsic::experimental_stackmap:
7708 case Intrinsic::experimental_patchpoint_void:
7709 case Intrinsic::experimental_patchpoint:
7712 case Intrinsic::experimental_gc_statepoint:
7715 case Intrinsic::experimental_gc_result:
7718 case Intrinsic::experimental_gc_relocate:
7721 case Intrinsic::instrprof_cover:
7723 case Intrinsic::instrprof_increment:
7725 case Intrinsic::instrprof_timestamp:
7727 case Intrinsic::instrprof_value_profile:
7729 case Intrinsic::instrprof_mcdc_parameters:
7731 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7733 case Intrinsic::localescape: {
7734 MachineFunction &MF =
DAG.getMachineFunction();
7735 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7739 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7745 "can only escape static allocas");
7750 TII->get(TargetOpcode::LOCAL_ESCAPE))
7758 case Intrinsic::localrecover: {
7760 MachineFunction &MF =
DAG.getMachineFunction();
7766 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7770 Value *
FP =
I.getArgOperand(1);
7776 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7781 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7787 case Intrinsic::fake_use: {
7788 Value *
V =
I.getArgOperand(0);
7793 auto FakeUseValue = [&]() ->
SDValue {
7807 if (!FakeUseValue || FakeUseValue.isUndef())
7810 Ops[1] = FakeUseValue;
7815 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7819 case Intrinsic::reloc_none: {
7824 DAG.getTargetExternalSymbol(
7826 DAG.setRoot(
DAG.getNode(ISD::RELOC_NONE, sdl, MVT::Other,
Ops));
7830 case Intrinsic::eh_exceptionpointer:
7831 case Intrinsic::eh_exceptioncode: {
7837 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7838 if (Intrinsic == Intrinsic::eh_exceptioncode)
7839 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7843 case Intrinsic::xray_customevent: {
7846 const auto &Triple =
DAG.getTarget().getTargetTriple();
7855 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7857 Ops.push_back(LogEntryVal);
7858 Ops.push_back(StrSizeVal);
7859 Ops.push_back(Chain);
7865 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7868 DAG.setRoot(patchableNode);
7872 case Intrinsic::xray_typedevent: {
7875 const auto &Triple =
DAG.getTarget().getTargetTriple();
7887 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7889 Ops.push_back(LogTypeId);
7890 Ops.push_back(LogEntryVal);
7891 Ops.push_back(StrSizeVal);
7892 Ops.push_back(Chain);
7898 MachineSDNode *MN =
DAG.getMachineNode(
7899 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7901 DAG.setRoot(patchableNode);
7905 case Intrinsic::experimental_deoptimize:
7908 case Intrinsic::stepvector:
7911 case Intrinsic::vector_reduce_fadd:
7912 case Intrinsic::vector_reduce_fmul:
7913 case Intrinsic::vector_reduce_add:
7914 case Intrinsic::vector_reduce_mul:
7915 case Intrinsic::vector_reduce_and:
7916 case Intrinsic::vector_reduce_or:
7917 case Intrinsic::vector_reduce_xor:
7918 case Intrinsic::vector_reduce_smax:
7919 case Intrinsic::vector_reduce_smin:
7920 case Intrinsic::vector_reduce_umax:
7921 case Intrinsic::vector_reduce_umin:
7922 case Intrinsic::vector_reduce_fmax:
7923 case Intrinsic::vector_reduce_fmin:
7924 case Intrinsic::vector_reduce_fmaximum:
7925 case Intrinsic::vector_reduce_fminimum:
7926 visitVectorReduce(
I, Intrinsic);
7929 case Intrinsic::icall_branch_funnel: {
7935 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7938 "llvm.icall.branch.funnel operand must be a GlobalValue");
7939 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7941 struct BranchFunnelTarget {
7947 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7950 if (ElemBase !=
Base)
7952 "to the same GlobalValue");
7958 "llvm.icall.branch.funnel operand must be a GlobalValue");
7964 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7965 return T1.Offset < T2.Offset;
7968 for (
auto &
T : Targets) {
7969 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7970 Ops.push_back(
T.Target);
7973 Ops.push_back(
DAG.getRoot());
7974 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7983 case Intrinsic::wasm_landingpad_index:
7989 case Intrinsic::aarch64_settag:
7990 case Intrinsic::aarch64_settag_zero: {
7991 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
7992 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7995 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8001 case Intrinsic::amdgcn_cs_chain: {
8006 Type *RetTy =
I.getType();
8016 for (
unsigned Idx : {2, 3, 1}) {
8017 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8019 Arg.setAttributes(&
I, Idx);
8020 Args.push_back(Arg);
8023 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8024 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8025 Args[2].IsInReg =
true;
8028 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8029 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8031 Arg.setAttributes(&
I, Idx);
8032 Args.push_back(Arg);
8035 TargetLowering::CallLoweringInfo CLI(
DAG);
8038 .setCallee(CC, RetTy, Callee, std::move(Args))
8041 .setConvergent(
I.isConvergent());
8043 std::pair<SDValue, SDValue>
Result =
8047 "Should've lowered as tail call");
8052 case Intrinsic::amdgcn_call_whole_wave: {
8054 bool isTailCall =
I.isTailCall();
8057 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8058 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8059 I.getArgOperand(Idx)->getType());
8060 Arg.setAttributes(&
I, Idx);
8067 Args.push_back(Arg);
8072 auto *Token = Bundle->Inputs[0].get();
8073 ConvControlToken =
getValue(Token);
8076 TargetLowering::CallLoweringInfo CLI(
DAG);
8080 getValue(
I.getArgOperand(0)), std::move(Args))
8084 .setConvergent(
I.isConvergent())
8085 .setConvergenceControlToken(ConvControlToken);
8088 std::pair<SDValue, SDValue>
Result =
8091 if (
Result.first.getNode())
8095 case Intrinsic::ptrmask: {
8111 auto HighOnes =
DAG.getNode(
8112 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8113 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8116 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8117 }
else if (
Mask.getValueType() != PtrVT)
8118 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8124 case Intrinsic::threadlocal_address: {
8128 case Intrinsic::get_active_lane_mask: {
8132 EVT ElementVT =
Index.getValueType();
8135 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8143 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8144 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8145 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8148 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8153 case Intrinsic::experimental_get_vector_length: {
8155 "Expected positive VF");
8160 EVT CountVT =
Count.getValueType();
8163 visitTargetIntrinsic(
I, Intrinsic);
8172 if (CountVT.
bitsLT(VT)) {
8177 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8187 case Intrinsic::vector_partial_reduce_add: {
8195 case Intrinsic::vector_partial_reduce_fadd: {
8199 ISD::PARTIAL_REDUCE_FMLA, sdl, Acc.
getValueType(), Acc,
8203 case Intrinsic::experimental_cttz_elts: {
8206 EVT OpVT =
Op.getValueType();
8209 visitTargetIntrinsic(
I, Intrinsic);
8225 ConstantRange VScaleRange(1,
true);
8254 case Intrinsic::vector_insert: {
8262 if (
Index.getValueType() != VectorIdxTy)
8263 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8270 case Intrinsic::vector_extract: {
8278 if (
Index.getValueType() != VectorIdxTy)
8279 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8285 case Intrinsic::experimental_vector_match: {
8291 EVT ResVT =
Mask.getValueType();
8297 visitTargetIntrinsic(
I, Intrinsic);
8303 for (
unsigned i = 0; i < SearchSize; ++i) {
8306 DAG.getVectorIdxConstant(i, sdl));
8315 case Intrinsic::vector_reverse:
8316 visitVectorReverse(
I);
8318 case Intrinsic::vector_splice:
8319 visitVectorSplice(
I);
8321 case Intrinsic::callbr_landingpad:
8322 visitCallBrLandingPad(
I);
8324 case Intrinsic::vector_interleave2:
8325 visitVectorInterleave(
I, 2);
8327 case Intrinsic::vector_interleave3:
8328 visitVectorInterleave(
I, 3);
8330 case Intrinsic::vector_interleave4:
8331 visitVectorInterleave(
I, 4);
8333 case Intrinsic::vector_interleave5:
8334 visitVectorInterleave(
I, 5);
8336 case Intrinsic::vector_interleave6:
8337 visitVectorInterleave(
I, 6);
8339 case Intrinsic::vector_interleave7:
8340 visitVectorInterleave(
I, 7);
8342 case Intrinsic::vector_interleave8:
8343 visitVectorInterleave(
I, 8);
8345 case Intrinsic::vector_deinterleave2:
8346 visitVectorDeinterleave(
I, 2);
8348 case Intrinsic::vector_deinterleave3:
8349 visitVectorDeinterleave(
I, 3);
8351 case Intrinsic::vector_deinterleave4:
8352 visitVectorDeinterleave(
I, 4);
8354 case Intrinsic::vector_deinterleave5:
8355 visitVectorDeinterleave(
I, 5);
8357 case Intrinsic::vector_deinterleave6:
8358 visitVectorDeinterleave(
I, 6);
8360 case Intrinsic::vector_deinterleave7:
8361 visitVectorDeinterleave(
I, 7);
8363 case Intrinsic::vector_deinterleave8:
8364 visitVectorDeinterleave(
I, 8);
8366 case Intrinsic::experimental_vector_compress:
8368 getValue(
I.getArgOperand(0)).getValueType(),
8373 case Intrinsic::experimental_convergence_anchor:
8374 case Intrinsic::experimental_convergence_entry:
8375 case Intrinsic::experimental_convergence_loop:
8376 visitConvergenceControl(
I, Intrinsic);
8378 case Intrinsic::experimental_vector_histogram_add: {
8379 visitVectorHistogram(
I, Intrinsic);
8382 case Intrinsic::experimental_vector_extract_last_active: {
8383 visitVectorExtractLastActive(
I, Intrinsic);
8386 case Intrinsic::loop_dependence_war_mask:
8392 case Intrinsic::loop_dependence_raw_mask:
8401void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8417 PendingConstrainedFP.push_back(OutChain);
8420 PendingConstrainedFPStrict.push_back(OutChain);
8425void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8439 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8441 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8445 Flags.setNoFPExcept(
true);
8448 Flags.copyFMF(*FPOp);
8453#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8454 case Intrinsic::INTRINSIC: \
8455 Opcode = ISD::STRICT_##DAGN; \
8457#include "llvm/IR/ConstrainedOps.def"
8458 case Intrinsic::experimental_constrained_fmuladd: {
8465 pushFPOpOutChain(
Mul, EB);
8488 if (TM.Options.NoNaNsFPMath)
8496 pushFPOpOutChain(Result, EB);
8503 std::optional<unsigned> ResOPC;
8505 case Intrinsic::vp_ctlz: {
8507 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8510 case Intrinsic::vp_cttz: {
8512 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8515 case Intrinsic::vp_cttz_elts: {
8517 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8520#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8521 case Intrinsic::VPID: \
8522 ResOPC = ISD::VPSD; \
8524#include "llvm/IR/VPIntrinsics.def"
8529 "Inconsistency: no SDNode available for this VPIntrinsic!");
8531 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8532 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8534 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8535 : ISD::VP_REDUCE_FMUL;
8541void SelectionDAGBuilder::visitVPLoad(
8553 Alignment =
DAG.getEVTAlign(VT);
8556 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8557 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8560 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8561 MachinePointerInfo(PtrOperand), MMOFlags,
8563 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8570void SelectionDAGBuilder::visitVPLoadFF(
8573 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8583 Alignment =
DAG.getEVTAlign(VT);
8586 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8587 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8590 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8595 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8598void SelectionDAGBuilder::visitVPGather(
8602 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8614 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8616 *Alignment, AAInfo, Ranges);
8626 EVT IdxVT =
Index.getValueType();
8632 LD =
DAG.getGatherVP(
8633 DAG.getVTList(VT, MVT::Other), VT,
DL,
8634 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8640void SelectionDAGBuilder::visitVPStore(
8644 EVT VT = OpValues[0].getValueType();
8649 Alignment =
DAG.getEVTAlign(VT);
8652 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8655 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8656 MachinePointerInfo(PtrOperand), MMOFlags,
8665void SelectionDAGBuilder::visitVPScatter(
8668 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8670 EVT VT = OpValues[0].getValueType();
8680 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8682 *Alignment, AAInfo);
8692 EVT IdxVT =
Index.getValueType();
8698 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8699 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8700 OpValues[2], OpValues[3]},
8706void SelectionDAGBuilder::visitVPStridedLoad(
8718 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8720 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8723 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8725 *Alignment, AAInfo, Ranges);
8727 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8728 OpValues[2], OpValues[3], MMO,
8736void SelectionDAGBuilder::visitVPStridedStore(
8740 EVT VT = OpValues[0].getValueType();
8746 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8749 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8751 *Alignment, AAInfo);
8755 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8763void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8764 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8775 if (TM.Options.NoNaNsFPMath)
8788 "Unexpected target EVL type");
8791 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8794 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8797void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8805 return visitVPCmp(*CmpI);
8808 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8810 SDVTList VTs =
DAG.getVTList(ValueVTs);
8816 "Unexpected target EVL type");
8820 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8822 if (
I == EVLParamPos)
8829 SDNodeFlags SDFlags;
8837 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8839 case ISD::VP_LOAD_FF:
8840 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8842 case ISD::VP_GATHER:
8843 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8845 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8846 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8849 visitVPStore(VPIntrin, OpValues);
8851 case ISD::VP_SCATTER:
8852 visitVPScatter(VPIntrin, OpValues);
8854 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8855 visitVPStridedStore(VPIntrin, OpValues);
8857 case ISD::VP_FMULADD: {
8858 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8859 SDNodeFlags SDFlags;
8864 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8867 ISD::VP_FMUL,
DL, VTs,
8868 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8870 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8871 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8876 case ISD::VP_IS_FPCLASS: {
8877 const DataLayout DLayout =
DAG.getDataLayout();
8879 auto Constant = OpValues[1]->getAsZExtVal();
8882 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8886 case ISD::VP_INTTOPTR: {
8897 case ISD::VP_PTRTOINT: {
8899 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8912 case ISD::VP_CTLZ_ZERO_UNDEF:
8914 case ISD::VP_CTTZ_ZERO_UNDEF:
8915 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8916 case ISD::VP_CTTZ_ELTS: {
8918 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8928 MachineFunction &MF =
DAG.getMachineFunction();
8936 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8937 if (CallSiteIndex) {
8951 assert(BeginLabel &&
"BeginLabel should've been set");
8953 MachineFunction &MF =
DAG.getMachineFunction();
8965 assert(
II &&
"II should've been set");
8976std::pair<SDValue, SDValue>
8990 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8993 "Non-null chain expected with non-tail call!");
8994 assert((Result.second.getNode() || !Result.first.getNode()) &&
8995 "Null value expected with tail call!");
8997 if (!Result.second.getNode()) {
9004 PendingExports.clear();
9006 DAG.setRoot(Result.second);
9024 if (!isMustTailCall &&
9025 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9031 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9032 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9041 bool isTailCall,
bool isMustTailCall,
9044 auto &
DL =
DAG.getDataLayout();
9051 const Value *SwiftErrorVal =
nullptr;
9058 const Value *V = *
I;
9061 if (V->getType()->isEmptyTy())
9066 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9078 Args.push_back(Entry);
9089 Value *V = Bundle->Inputs[0];
9091 Entry.IsCFGuardTarget =
true;
9092 Args.push_back(Entry);
9105 "Target doesn't support calls with kcfi operand bundles.");
9113 auto *Token = Bundle->Inputs[0].get();
9114 ConvControlToken =
getValue(Token);
9120 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9132 "This target doesn't support calls with ptrauth operand bundles.");
9136 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9138 if (Result.first.getNode()) {
9153 DAG.setRoot(CopyNode);
9169 LoadTy, Builder.DAG.getDataLayout()))
9170 return Builder.getValue(LoadCst);
9176 bool ConstantMemory =
false;
9179 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9180 Root = Builder.DAG.getEntryNode();
9181 ConstantMemory =
true;
9184 Root = Builder.DAG.getRoot();
9189 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9192 if (!ConstantMemory)
9193 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9199void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9202 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9213bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9214 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9215 const Value *
Size =
I.getArgOperand(2);
9218 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9224 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9228 if (Res.first.getNode()) {
9229 processIntegerCallValue(
I, Res.first,
true);
9243 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9244 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9266 switch (NumBitsToCompare) {
9278 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9291 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9292 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9296 processIntegerCallValue(
I, Cmp,
false);
9305bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9306 const Value *Src =
I.getArgOperand(0);
9307 const Value *
Char =
I.getArgOperand(1);
9308 const Value *
Length =
I.getArgOperand(2);
9310 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9311 std::pair<SDValue, SDValue> Res =
9314 MachinePointerInfo(Src));
9315 if (Res.first.getNode()) {
9329bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9334 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9335 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9337 Align Alignment = std::min(DstAlign, SrcAlign);
9346 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9347 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9348 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9350 "** memcpy should not be lowered as TailCall in mempcpy context **");
9354 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9367bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9368 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9370 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9371 std::pair<SDValue, SDValue> Res =
9374 MachinePointerInfo(Arg0),
9375 MachinePointerInfo(Arg1), isStpcpy);
9376 if (Res.first.getNode()) {
9378 DAG.setRoot(Res.second);
9390bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9391 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9393 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9394 std::pair<SDValue, SDValue> Res =
9397 MachinePointerInfo(Arg0),
9398 MachinePointerInfo(Arg1));
9399 if (Res.first.getNode()) {
9400 processIntegerCallValue(
I, Res.first,
true);
9413bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9414 const Value *Arg0 =
I.getArgOperand(0);
9416 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9419 if (Res.first.getNode()) {
9420 processIntegerCallValue(
I, Res.first,
false);
9433bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9434 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9436 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9437 std::pair<SDValue, SDValue> Res =
9440 MachinePointerInfo(Arg0));
9441 if (Res.first.getNode()) {
9442 processIntegerCallValue(
I, Res.first,
false);
9455bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9460 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9477bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9482 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9495void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9497 if (
I.isInlineAsm()) {
9504 if (Function *
F =
I.getCalledFunction()) {
9505 if (
F->isDeclaration()) {
9507 if (
unsigned IID =
F->getIntrinsicID()) {
9508 visitIntrinsicCall(
I, IID);
9517 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9518 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9522 if (visitMemCmpBCmpCall(
I))
9525 case LibFunc_copysign:
9526 case LibFunc_copysignf:
9527 case LibFunc_copysignl:
9530 if (
I.onlyReadsMemory()) {
9541 if (visitUnaryFloatCall(
I, ISD::FABS))
9547 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9553 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9556 case LibFunc_fminimum_num:
9557 case LibFunc_fminimum_numf:
9558 case LibFunc_fminimum_numl:
9559 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9562 case LibFunc_fmaximum_num:
9563 case LibFunc_fmaximum_numf:
9564 case LibFunc_fmaximum_numl:
9565 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9571 if (visitUnaryFloatCall(
I, ISD::FSIN))
9577 if (visitUnaryFloatCall(
I, ISD::FCOS))
9583 if (visitUnaryFloatCall(
I, ISD::FTAN))
9589 if (visitUnaryFloatCall(
I, ISD::FASIN))
9595 if (visitUnaryFloatCall(
I, ISD::FACOS))
9601 if (visitUnaryFloatCall(
I, ISD::FATAN))
9605 case LibFunc_atan2f:
9606 case LibFunc_atan2l:
9607 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9613 if (visitUnaryFloatCall(
I, ISD::FSINH))
9619 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9625 if (visitUnaryFloatCall(
I, ISD::FTANH))
9631 case LibFunc_sqrt_finite:
9632 case LibFunc_sqrtf_finite:
9633 case LibFunc_sqrtl_finite:
9634 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9638 case LibFunc_floorf:
9639 case LibFunc_floorl:
9640 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9643 case LibFunc_nearbyint:
9644 case LibFunc_nearbyintf:
9645 case LibFunc_nearbyintl:
9646 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9652 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9658 if (visitUnaryFloatCall(
I, ISD::FRINT))
9662 case LibFunc_roundf:
9663 case LibFunc_roundl:
9664 if (visitUnaryFloatCall(
I, ISD::FROUND))
9668 case LibFunc_truncf:
9669 case LibFunc_truncl:
9670 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9676 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9682 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9686 case LibFunc_exp10f:
9687 case LibFunc_exp10l:
9688 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9692 case LibFunc_ldexpf:
9693 case LibFunc_ldexpl:
9694 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9697 case LibFunc_memcmp:
9698 if (visitMemCmpBCmpCall(
I))
9701 case LibFunc_mempcpy:
9702 if (visitMemPCpyCall(
I))
9705 case LibFunc_memchr:
9706 if (visitMemChrCall(
I))
9709 case LibFunc_strcpy:
9710 if (visitStrCpyCall(
I,
false))
9713 case LibFunc_stpcpy:
9714 if (visitStrCpyCall(
I,
true))
9717 case LibFunc_strcmp:
9718 if (visitStrCmpCall(
I))
9721 case LibFunc_strlen:
9722 if (visitStrLenCall(
I))
9725 case LibFunc_strnlen:
9726 if (visitStrNLenCall(
I))
9750 if (
I.hasDeoptState())
9767 const Value *Discriminator = PAB->Inputs[1];
9769 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9770 assert(Discriminator->getType()->isIntegerTy(64) &&
9771 "Invalid ptrauth discriminator");
9776 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9777 DAG.getDataLayout()))
9817 for (
const auto &Code : Codes)
9832 SDISelAsmOperandInfo &MatchingOpInfo,
9834 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9840 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9842 OpInfo.ConstraintVT);
9843 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9845 MatchingOpInfo.ConstraintVT);
9846 const bool OutOpIsIntOrFP =
9847 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9848 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9849 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9850 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9853 " with a matching output constraint of"
9854 " incompatible type!");
9856 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9863 SDISelAsmOperandInfo &OpInfo,
9876 const Value *OpVal = OpInfo.CallOperandVal;
9894 DL.getPrefTypeAlign(Ty),
false,
9897 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9900 OpInfo.CallOperand = StackSlot;
9913static std::optional<unsigned>
9915 SDISelAsmOperandInfo &OpInfo,
9916 SDISelAsmOperandInfo &RefOpInfo) {
9927 return std::nullopt;
9931 unsigned AssignedReg;
9934 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9937 return std::nullopt;
9942 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9944 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9953 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9958 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9963 OpInfo.CallOperand =
9964 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9965 OpInfo.ConstraintVT = RegVT;
9969 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9972 OpInfo.CallOperand =
9973 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9974 OpInfo.ConstraintVT = VT;
9981 if (OpInfo.isMatchingInputConstraint())
9982 return std::nullopt;
9984 EVT ValueVT = OpInfo.ConstraintVT;
9985 if (OpInfo.ConstraintVT == MVT::Other)
9989 unsigned NumRegs = 1;
9990 if (OpInfo.ConstraintVT != MVT::Other)
10005 I = std::find(
I, RC->
end(), AssignedReg);
10006 if (
I == RC->
end()) {
10009 return {AssignedReg};
10013 for (; NumRegs; --NumRegs, ++
I) {
10014 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10019 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10020 return std::nullopt;
10025 const std::vector<SDValue> &AsmNodeOperands) {
10028 for (; OperandNo; --OperandNo) {
10030 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10033 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10034 "Skipped past definitions?");
10035 CurOp +=
F.getNumOperandRegisters() + 1;
10043 unsigned Flags = 0;
10046 explicit ExtraFlags(
const CallBase &
Call) {
10048 if (
IA->hasSideEffects())
10050 if (
IA->isAlignStack())
10057 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10073 unsigned get()
const {
return Flags; }
10096void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10103 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10105 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10109 bool HasSideEffect =
IA->hasSideEffects();
10110 ExtraFlags ExtraInfo(
Call);
10112 for (
auto &
T : TargetConstraints) {
10113 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10114 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10116 if (OpInfo.CallOperandVal)
10117 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10119 if (!HasSideEffect)
10120 HasSideEffect = OpInfo.hasMemory(TLI);
10132 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10133 "' expects an integer constant "
10136 ExtraInfo.update(
T);
10144 if (EmitEHLabels) {
10145 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10149 if (IsCallBr || EmitEHLabels) {
10157 if (EmitEHLabels) {
10158 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10163 IA->collectAsmStrs(AsmStrs);
10166 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10174 if (OpInfo.hasMatchingInput()) {
10175 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10206 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10209 OpInfo.isIndirect =
false;
10216 !OpInfo.isIndirect) {
10217 assert((OpInfo.isMultipleAlternative ||
10219 "Can only indirectify direct input operands!");
10225 OpInfo.CallOperandVal =
nullptr;
10228 OpInfo.isIndirect =
true;
10234 std::vector<SDValue> AsmNodeOperands;
10235 AsmNodeOperands.push_back(
SDValue());
10236 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10243 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10247 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10252 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10254 SDISelAsmOperandInfo &RefOpInfo =
10255 OpInfo.isMatchingInputConstraint()
10256 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10258 const auto RegError =
10261 const MachineFunction &MF =
DAG.getMachineFunction();
10263 const char *
RegName =
TRI.getName(*RegError);
10264 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10265 "' allocated for constraint '" +
10266 Twine(OpInfo.ConstraintCode) +
10267 "' does not match required type");
10271 auto DetectWriteToReservedRegister = [&]() {
10272 const MachineFunction &MF =
DAG.getMachineFunction();
10277 emitInlineAsmError(
Call,
"write to reserved register '" +
10286 !OpInfo.isMatchingInputConstraint())) &&
10287 "Only address as input operand is allowed.");
10289 switch (OpInfo.Type) {
10295 "Failed to convert memory constraint code to constraint id.");
10299 OpFlags.setMemConstraint(ConstraintID);
10300 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10302 AsmNodeOperands.push_back(OpInfo.CallOperand);
10307 if (OpInfo.AssignedRegs.
Regs.empty()) {
10308 emitInlineAsmError(
10309 Call,
"couldn't allocate output register for constraint '" +
10310 Twine(OpInfo.ConstraintCode) +
"'");
10314 if (DetectWriteToReservedRegister())
10328 SDValue InOperandVal = OpInfo.CallOperand;
10330 if (OpInfo.isMatchingInputConstraint()) {
10335 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10336 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10337 if (OpInfo.isIndirect) {
10339 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10340 "don't know how to handle tied "
10341 "indirect register inputs");
10346 MachineFunction &MF =
DAG.getMachineFunction();
10351 MVT RegVT =
R->getSimpleValueType(0);
10352 const TargetRegisterClass *RC =
10355 :
TRI.getMinimalPhysRegClass(TiedReg);
10356 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10359 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10363 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10365 OpInfo.getMatchedOperand(), dl,
DAG,
10370 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10371 assert(
Flag.getNumOperandRegisters() == 1 &&
10372 "Unexpected number of operands");
10375 Flag.clearMemConstraint();
10376 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10377 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10379 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10390 std::vector<SDValue>
Ops;
10396 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10397 Twine(OpInfo.ConstraintCode) +
"'");
10401 emitInlineAsmError(
Call,
10402 "invalid operand for inline asm constraint '" +
10403 Twine(OpInfo.ConstraintCode) +
"'");
10409 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10416 assert((OpInfo.isIndirect ||
10418 "Operand must be indirect to be a mem!");
10421 "Memory operands expect pointer values");
10426 "Failed to convert memory constraint code to constraint id.");
10430 ResOpType.setMemConstraint(ConstraintID);
10431 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10434 AsmNodeOperands.push_back(InOperandVal);
10442 "Failed to convert memory constraint code to constraint id.");
10446 SDValue AsmOp = InOperandVal;
10450 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10456 ResOpType.setMemConstraint(ConstraintID);
10458 AsmNodeOperands.push_back(
10461 AsmNodeOperands.push_back(AsmOp);
10467 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10468 Twine(OpInfo.ConstraintCode) +
"'");
10473 if (OpInfo.isIndirect) {
10474 emitInlineAsmError(
10475 Call,
"Don't know how to handle indirect register inputs yet "
10476 "for constraint '" +
10477 Twine(OpInfo.ConstraintCode) +
"'");
10482 if (OpInfo.AssignedRegs.
Regs.empty()) {
10483 emitInlineAsmError(
Call,
10484 "couldn't allocate input reg for constraint '" +
10485 Twine(OpInfo.ConstraintCode) +
"'");
10489 if (DetectWriteToReservedRegister())
10498 0, dl,
DAG, AsmNodeOperands);
10504 if (!OpInfo.AssignedRegs.
Regs.empty())
10514 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10516 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10518 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10530 ResultTypes = StructResult->elements();
10531 else if (!CallResultType->
isVoidTy())
10532 ResultTypes =
ArrayRef(CallResultType);
10534 auto CurResultType = ResultTypes.
begin();
10535 auto handleRegAssign = [&](
SDValue V) {
10536 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10537 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10538 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10550 if (ResultVT !=
V.getValueType() &&
10553 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10554 V.getValueType().isInteger()) {
10560 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10566 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10570 if (OpInfo.AssignedRegs.
Regs.empty())
10573 switch (OpInfo.ConstraintType) {
10577 Chain, &Glue, &
Call);
10589 assert(
false &&
"Unexpected unknown constraint");
10593 if (OpInfo.isIndirect) {
10594 const Value *Ptr = OpInfo.CallOperandVal;
10595 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10597 MachinePointerInfo(Ptr));
10604 handleRegAssign(V);
10606 handleRegAssign(Val);
10612 if (!ResultValues.
empty()) {
10613 assert(CurResultType == ResultTypes.
end() &&
10614 "Mismatch in number of ResultTypes");
10616 "Mismatch in number of output operands in asm result");
10619 DAG.getVTList(ResultVTs), ResultValues);
10624 if (!OutChains.
empty())
10627 if (EmitEHLabels) {
10632 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10634 DAG.setRoot(Chain);
10637void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10638 const Twine &Message) {
10639 LLVMContext &Ctx = *
DAG.getContext();
10643 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10647 if (ValueVTs.
empty())
10651 for (
const EVT &VT : ValueVTs)
10652 Ops.push_back(
DAG.getUNDEF(VT));
10657void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10661 DAG.getSrcValue(
I.getArgOperand(0))));
10664void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10665 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10666 const DataLayout &
DL =
DAG.getDataLayout();
10670 DL.getABITypeAlign(
I.getType()).value());
10671 DAG.setRoot(
V.getValue(1));
10673 if (
I.getType()->isPointerTy())
10674 V =
DAG.getPtrExtOrTrunc(
10679void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10683 DAG.getSrcValue(
I.getArgOperand(0))));
10686void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10691 DAG.getSrcValue(
I.getArgOperand(0)),
10692 DAG.getSrcValue(
I.getArgOperand(1))));
10698 std::optional<ConstantRange> CR =
getRange(
I);
10700 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10703 APInt Lo = CR->getUnsignedMin();
10704 if (!
Lo.isMinValue())
10707 APInt Hi = CR->getUnsignedMax();
10708 unsigned Bits = std::max(
Hi.getActiveBits(),
10716 DAG.getValueType(SmallVT));
10717 unsigned NumVals =
Op.getNode()->getNumValues();
10723 Ops.push_back(ZExt);
10724 for (
unsigned I = 1;
I != NumVals; ++
I)
10725 Ops.push_back(
Op.getValue(
I));
10727 return DAG.getMergeValues(
Ops,
SL);
10737 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10745 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10748 MergeOp, TestConst);
10751 return DAG.getMergeValues(
Ops,
SL);
10762 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10765 Args.reserve(NumArgs);
10769 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10770 ArgI != ArgE; ++ArgI) {
10771 const Value *V =
Call->getOperand(ArgI);
10773 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10776 Entry.setAttributes(
Call, ArgI);
10777 Args.push_back(Entry);
10782 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10811 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10820 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10826void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10852 Ops.push_back(Chain);
10853 Ops.push_back(InGlue);
10860 assert(
ID.getValueType() == MVT::i64);
10862 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10863 Ops.push_back(IDConst);
10869 Ops.push_back(ShadConst);
10875 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10876 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10879 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10884 DAG.setRoot(Chain);
10887 FuncInfo.MF->getFrameInfo().setHasStackMap();
10891void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10908 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10911 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10912 SDLoc(SymbolicCallee),
10913 SymbolicCallee->getValueType(0));
10923 "Not enough arguments provided to the patchpoint intrinsic");
10926 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10930 TargetLowering::CallLoweringInfo CLI(
DAG);
10935 SDNode *CallEnd =
Result.second.getNode();
10936 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10944 "Expected a callseq node.");
10946 bool HasGlue =
Call->getGluedNode();
10971 Ops.push_back(Callee);
10977 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10978 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10981 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10986 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10997 if (IsAnyRegCC && HasDef) {
10999 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11002 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
11007 NodeTys =
DAG.getVTList(ValueVTs);
11009 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11012 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
11026 if (IsAnyRegCC && HasDef) {
11029 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11035 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11038void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11040 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11043 if (
I.arg_size() > 1)
11048 SDNodeFlags SDFlags;
11052 switch (Intrinsic) {
11053 case Intrinsic::vector_reduce_fadd:
11056 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
11059 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
11061 case Intrinsic::vector_reduce_fmul:
11064 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
11067 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
11069 case Intrinsic::vector_reduce_add:
11070 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
11072 case Intrinsic::vector_reduce_mul:
11073 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
11075 case Intrinsic::vector_reduce_and:
11076 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
11078 case Intrinsic::vector_reduce_or:
11079 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
11081 case Intrinsic::vector_reduce_xor:
11082 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
11084 case Intrinsic::vector_reduce_smax:
11085 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
11087 case Intrinsic::vector_reduce_smin:
11088 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
11090 case Intrinsic::vector_reduce_umax:
11091 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
11093 case Intrinsic::vector_reduce_umin:
11094 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
11096 case Intrinsic::vector_reduce_fmax:
11097 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
11099 case Intrinsic::vector_reduce_fmin:
11100 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
11102 case Intrinsic::vector_reduce_fmaximum:
11103 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
11105 case Intrinsic::vector_reduce_fminimum:
11106 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11119 Attrs.push_back(Attribute::SExt);
11121 Attrs.push_back(Attribute::ZExt);
11123 Attrs.push_back(Attribute::InReg);
11125 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11133std::pair<SDValue, SDValue>
11147 "Only supported for non-aggregate returns");
11150 for (
Type *Ty : RetOrigTys)
11159 RetOrigTys.
swap(OldRetOrigTys);
11160 RetVTs.
swap(OldRetVTs);
11161 Offsets.swap(OldOffsets);
11163 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11164 EVT RetVT = OldRetVTs[i];
11168 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11169 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11170 RetVTs.
append(NumRegs, RegisterVT);
11171 for (
unsigned j = 0; j != NumRegs; ++j)
11184 int DemoteStackIdx = -100;
11197 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11198 Entry.IsSRet =
true;
11199 Entry.Alignment = Alignment;
11211 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11213 if (NeedsRegBlock) {
11214 Flags.setInConsecutiveRegs();
11215 if (
I == RetVTs.
size() - 1)
11216 Flags.setInConsecutiveRegsLast();
11218 EVT VT = RetVTs[
I];
11222 for (
unsigned i = 0; i != NumRegs; ++i) {
11226 Ret.Flags.setPointer();
11227 Ret.Flags.setPointerAddrSpace(
11231 Ret.Flags.setSExt();
11233 Ret.Flags.setZExt();
11235 Ret.Flags.setInReg();
11236 CLI.
Ins.push_back(Ret);
11245 if (Arg.IsSwiftError) {
11251 CLI.
Ins.push_back(Ret);
11259 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11263 Type *FinalType = Args[i].Ty;
11264 if (Args[i].IsByVal)
11265 FinalType = Args[i].IndirectType;
11268 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11271 Type *ArgTy = OrigArgTy;
11272 if (Args[i].Ty != Args[i].OrigTy) {
11273 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11274 ArgTy = Args[i].Ty;
11279 Args[i].Node.getResNo() +
Value);
11286 Flags.setOrigAlign(OriginalAlignment);
11291 Flags.setPointer();
11294 if (Args[i].IsZExt)
11296 if (Args[i].IsSExt)
11298 if (Args[i].IsNoExt)
11300 if (Args[i].IsInReg) {
11307 Flags.setHvaStart();
11313 if (Args[i].IsSRet)
11315 if (Args[i].IsSwiftSelf)
11316 Flags.setSwiftSelf();
11317 if (Args[i].IsSwiftAsync)
11318 Flags.setSwiftAsync();
11319 if (Args[i].IsSwiftError)
11320 Flags.setSwiftError();
11321 if (Args[i].IsCFGuardTarget)
11322 Flags.setCFGuardTarget();
11323 if (Args[i].IsByVal)
11325 if (Args[i].IsByRef)
11327 if (Args[i].IsPreallocated) {
11328 Flags.setPreallocated();
11336 if (Args[i].IsInAlloca) {
11337 Flags.setInAlloca();
11346 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11347 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11348 Flags.setByValSize(FrameSize);
11351 if (
auto MA = Args[i].Alignment)
11355 }
else if (
auto MA = Args[i].Alignment) {
11358 MemAlign = OriginalAlignment;
11360 Flags.setMemAlign(MemAlign);
11361 if (Args[i].IsNest)
11364 Flags.setInConsecutiveRegs();
11367 unsigned NumParts =
11372 if (Args[i].IsSExt)
11374 else if (Args[i].IsZExt)
11379 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11384 Args[i].Ty->getPointerAddressSpace())) &&
11385 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11398 CLI.
RetZExt == Args[i].IsZExt))
11399 Flags.setReturned();
11405 for (
unsigned j = 0; j != NumParts; ++j) {
11411 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11412 if (NumParts > 1 && j == 0)
11416 if (j == NumParts - 1)
11420 CLI.
Outs.push_back(MyFlags);
11421 CLI.
OutVals.push_back(Parts[j]);
11424 if (NeedsRegBlock &&
Value == NumValues - 1)
11425 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11437 "LowerCall didn't return a valid chain!");
11439 "LowerCall emitted a return value for a tail call!");
11441 "LowerCall didn't emit the correct number of values!");
11453 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11454 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11455 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11456 "LowerCall emitted a value with the wrong type!");
11466 unsigned NumValues = RetVTs.
size();
11467 ReturnValues.
resize(NumValues);
11474 for (
unsigned i = 0; i < NumValues; ++i) {
11481 DemoteStackIdx, Offsets[i]),
11483 ReturnValues[i] = L;
11484 Chains[i] = L.getValue(1);
11491 std::optional<ISD::NodeType> AssertOp;
11496 unsigned CurReg = 0;
11497 for (
EVT VT : RetVTs) {
11503 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11511 if (ReturnValues.
empty())
11517 return std::make_pair(Res, CLI.
Chain);
11534 if (
N->getNumValues() == 1) {
11542 "Lowering returned the wrong number of results!");
11545 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11559 "Copy from a reg to the same reg!");
11560 assert(!Reg.isPhysical() &&
"Is a physreg");
11566 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11571 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11572 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11573 ExtendType = PreferredExtendIt->second;
11576 PendingExports.push_back(Chain);
11588 return A->use_empty();
11590 const BasicBlock &Entry =
A->getParent()->front();
11591 for (
const User *U :
A->users())
11600 std::pair<const AllocaInst *, const StoreInst *>>;
11612 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11614 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11615 StaticAllocas.
reserve(NumArgs * 2);
11617 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11620 V = V->stripPointerCasts();
11622 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11625 return &Iter.first->second;
11642 if (
I.isDebugOrPseudoInst())
11646 for (
const Use &U :
I.operands()) {
11647 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11648 *
Info = StaticAllocaInfo::Clobbered;
11654 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11655 *
Info = StaticAllocaInfo::Clobbered;
11658 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11659 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11665 if (*
Info != StaticAllocaInfo::Unknown)
11673 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11675 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11679 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11680 ArgCopyElisionCandidates.count(Arg)) {
11681 *
Info = StaticAllocaInfo::Clobbered;
11685 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11689 *
Info = StaticAllocaInfo::Elidable;
11690 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11695 if (ArgCopyElisionCandidates.size() == NumArgs)
11719 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11720 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11721 const AllocaInst *AI = ArgCopyIter->second.first;
11722 int FixedIndex = FINode->getIndex();
11724 int OldIndex = AllocaIndex;
11728 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11734 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11735 "greater than stack argument alignment ("
11736 <<
DebugStr(RequiredAlignment) <<
" vs "
11744 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11745 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11751 AllocaIndex = FixedIndex;
11752 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11753 for (
SDValue ArgVal : ArgVals)
11757 const StoreInst *
SI = ArgCopyIter->second.second;
11770void SelectionDAGISel::LowerArguments(
const Function &
F) {
11771 SelectionDAG &DAG =
SDB->DAG;
11772 SDLoc dl =
SDB->getCurSDLoc();
11777 if (
F.hasFnAttribute(Attribute::Naked))
11782 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11784 ISD::ArgFlagsTy
Flags;
11786 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11787 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11789 Ins.push_back(RetArg);
11797 ArgCopyElisionCandidates);
11800 for (
const Argument &Arg :
F.args()) {
11801 unsigned ArgNo = Arg.getArgNo();
11804 bool isArgValueUsed = !Arg.
use_empty();
11805 unsigned PartBase = 0;
11807 if (Arg.hasAttribute(Attribute::ByVal))
11808 FinalType = Arg.getParamByValType();
11809 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11810 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11811 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11814 EVT VT =
TLI->getValueType(
DL, ArgTy);
11815 ISD::ArgFlagsTy
Flags;
11818 Flags.setPointer();
11821 if (Arg.hasAttribute(Attribute::ZExt))
11823 if (Arg.hasAttribute(Attribute::SExt))
11825 if (Arg.hasAttribute(Attribute::InReg)) {
11832 Flags.setHvaStart();
11838 if (Arg.hasAttribute(Attribute::StructRet))
11840 if (Arg.hasAttribute(Attribute::SwiftSelf))
11841 Flags.setSwiftSelf();
11842 if (Arg.hasAttribute(Attribute::SwiftAsync))
11843 Flags.setSwiftAsync();
11844 if (Arg.hasAttribute(Attribute::SwiftError))
11845 Flags.setSwiftError();
11846 if (Arg.hasAttribute(Attribute::ByVal))
11848 if (Arg.hasAttribute(Attribute::ByRef))
11850 if (Arg.hasAttribute(Attribute::InAlloca)) {
11851 Flags.setInAlloca();
11859 if (Arg.hasAttribute(Attribute::Preallocated)) {
11860 Flags.setPreallocated();
11872 const Align OriginalAlignment(
11873 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11874 Flags.setOrigAlign(OriginalAlignment);
11877 Type *ArgMemTy =
nullptr;
11878 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11881 ArgMemTy = Arg.getPointeeInMemoryValueType();
11883 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11888 if (
auto ParamAlign = Arg.getParamStackAlign())
11889 MemAlign = *ParamAlign;
11890 else if ((ParamAlign = Arg.getParamAlign()))
11891 MemAlign = *ParamAlign;
11893 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11894 if (
Flags.isByRef())
11895 Flags.setByRefSize(MemSize);
11897 Flags.setByValSize(MemSize);
11898 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11899 MemAlign = *ParamAlign;
11901 MemAlign = OriginalAlignment;
11903 Flags.setMemAlign(MemAlign);
11905 if (Arg.hasAttribute(Attribute::Nest))
11908 Flags.setInConsecutiveRegs();
11909 if (ArgCopyElisionCandidates.count(&Arg))
11910 Flags.setCopyElisionCandidate();
11911 if (Arg.hasAttribute(Attribute::Returned))
11912 Flags.setReturned();
11914 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11915 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11916 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11917 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11918 for (
unsigned i = 0; i != NumRegs; ++i) {
11922 ISD::InputArg MyFlags(
11923 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11925 if (NumRegs > 1 && i == 0)
11926 MyFlags.Flags.setSplit();
11929 MyFlags.Flags.setOrigAlign(
Align(1));
11930 if (i == NumRegs - 1)
11931 MyFlags.Flags.setSplitEnd();
11933 Ins.push_back(MyFlags);
11935 if (NeedsRegBlock &&
Value == NumValues - 1)
11936 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11943 SDValue NewRoot =
TLI->LowerFormalArguments(
11944 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11948 "LowerFormalArguments didn't return a valid chain!");
11950 "LowerFormalArguments didn't emit the correct number of values!");
11952 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11954 "LowerFormalArguments emitted a null value!");
11956 "LowerFormalArguments emitted a value with the wrong type!");
11968 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11969 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11970 std::optional<ISD::NodeType> AssertOp;
11973 F.getCallingConv(), AssertOp);
11975 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11976 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11978 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11979 FuncInfo->DemoteRegister = SRetReg;
11981 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11989 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11990 for (
const Argument &Arg :
F.args()) {
11994 unsigned NumValues = ValueVTs.
size();
11995 if (NumValues == 0)
12002 if (Ins[i].
Flags.isCopyElisionCandidate()) {
12003 unsigned NumParts = 0;
12004 for (EVT VT : ValueVTs)
12005 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
12006 F.getCallingConv(), VT);
12010 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12015 bool isSwiftErrorArg =
12016 TLI->supportSwiftError() &&
12017 Arg.hasAttribute(Attribute::SwiftError);
12018 if (!ArgHasUses && !isSwiftErrorArg) {
12019 SDB->setUnusedArgValue(&Arg, InVals[i]);
12022 if (FrameIndexSDNode *FI =
12024 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12027 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12028 EVT VT = ValueVTs[Val];
12029 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12030 F.getCallingConv(), VT);
12031 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12032 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12037 if (ArgHasUses || isSwiftErrorArg) {
12038 std::optional<ISD::NodeType> AssertOp;
12039 if (Arg.hasAttribute(Attribute::SExt))
12041 else if (Arg.hasAttribute(Attribute::ZExt))
12046 NewRoot,
F.getCallingConv(), AssertOp);
12049 if (NoFPClass !=
fcNone) {
12051 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12053 OutVal, SDNoFPClass);
12062 if (ArgValues.
empty())
12066 if (FrameIndexSDNode *FI =
12068 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12071 SDB->getCurSDLoc());
12073 SDB->setValue(&Arg, Res);
12083 if (LoadSDNode *LNode =
12085 if (FrameIndexSDNode *FI =
12087 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12115 FuncInfo->InitializeRegForValue(&Arg);
12116 SDB->CopyToExportRegsIfNeeded(&Arg);
12120 if (!Chains.
empty()) {
12127 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12131 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12132 for (MachineFunction::VariableDbgInfo &VI :
12133 MF->getInStackSlotVariableDbgInfo()) {
12134 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12135 if (
I != ArgCopyElisionFrameIndexMap.
end())
12136 VI.updateStackSlot(
I->second);
12151SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12152 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12154 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12160 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12164 if (!SuccsHandled.
insert(SuccMBB).second)
12172 for (
const PHINode &PN : SuccBB->phis()) {
12174 if (PN.use_empty())
12178 if (PN.getType()->isEmptyTy())
12182 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12187 RegOut =
FuncInfo.CreateRegs(&PN);
12205 "Didn't codegen value into a register!??");
12215 for (EVT VT : ValueVTs) {
12217 for (
unsigned i = 0; i != NumRegisters; ++i)
12219 Reg += NumRegisters;
12239void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12241 if (MaybeTC.
getNode() !=
nullptr)
12242 DAG.setRoot(MaybeTC);
12247void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12250 MachineFunction *CurMF =
FuncInfo.MF;
12251 MachineBasicBlock *NextMBB =
nullptr;
12256 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12258 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12260 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12268 CaseCluster &
Small = *
W.FirstCluster;
12269 CaseCluster &
Big = *
W.LastCluster;
12273 const APInt &SmallValue =
Small.Low->getValue();
12274 const APInt &BigValue =
Big.Low->getValue();
12277 APInt CommonBit = BigValue ^ SmallValue;
12284 DAG.getConstant(CommonBit,
DL, VT));
12286 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12292 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12294 addSuccessorWithProb(
12295 SwitchMBB, DefaultMBB,
12299 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12306 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12307 DAG.getBasicBlock(DefaultMBB));
12309 DAG.setRoot(BrCond);
12321 [](
const CaseCluster &a,
const CaseCluster &b) {
12322 return a.Prob != b.Prob ?
12324 a.Low->getValue().slt(b.Low->getValue());
12331 if (
I->Prob >
W.LastCluster->Prob)
12333 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12341 BranchProbability DefaultProb =
W.DefaultProb;
12342 BranchProbability UnhandledProbs = DefaultProb;
12344 UnhandledProbs +=
I->Prob;
12346 MachineBasicBlock *CurMBB =
W.MBB;
12348 bool FallthroughUnreachable =
false;
12349 MachineBasicBlock *Fallthrough;
12350 if (
I ==
W.LastCluster) {
12352 Fallthrough = DefaultMBB;
12357 CurMF->
insert(BBI, Fallthrough);
12361 UnhandledProbs -=
I->Prob;
12366 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12367 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12370 MachineBasicBlock *JumpMBB =
JT->MBB;
12371 CurMF->
insert(BBI, JumpMBB);
12373 auto JumpProb =
I->Prob;
12374 auto FallthroughProb = UnhandledProbs;
12382 if (*SI == DefaultMBB) {
12383 JumpProb += DefaultProb / 2;
12384 FallthroughProb -= DefaultProb / 2;
12402 if (FallthroughUnreachable) {
12409 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12410 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12416 JT->Default = Fallthrough;
12419 if (CurMBB == SwitchMBB) {
12427 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12430 for (BitTestCase &BTC : BTB->
Cases)
12442 BTB->
Prob += DefaultProb / 2;
12446 if (FallthroughUnreachable)
12450 if (CurMBB == SwitchMBB) {
12457 const Value *
RHS, *
LHS, *MHS;
12459 if (
I->Low ==
I->High) {
12474 if (FallthroughUnreachable)
12478 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12481 if (CurMBB == SwitchMBB)
12484 SL->SwitchCases.push_back(CB);
12489 CurMBB = Fallthrough;
12493void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12494 const SwitchWorkListItem &W,
12497 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12498 "Clusters not sorted?");
12499 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12501 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12502 SL->computeSplitWorkItemInfo(W);
12507 assert(PivotCluster >
W.FirstCluster);
12508 assert(PivotCluster <=
W.LastCluster);
12513 const ConstantInt *Pivot = PivotCluster->Low;
12522 MachineBasicBlock *LeftMBB;
12523 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12524 FirstLeft->Low ==
W.GE &&
12525 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12526 LeftMBB = FirstLeft->MBB;
12528 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12529 FuncInfo.MF->insert(BBI, LeftMBB);
12531 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12539 MachineBasicBlock *RightMBB;
12540 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12541 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12542 RightMBB = FirstRight->MBB;
12544 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12545 FuncInfo.MF->insert(BBI, RightMBB);
12547 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12553 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12556 if (
W.MBB == SwitchMBB)
12559 SL->SwitchCases.push_back(CB);
12584 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12592 unsigned PeeledCaseIndex = 0;
12593 bool SwitchPeeled =
false;
12594 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12595 CaseCluster &CC = Clusters[
Index];
12596 if (CC.
Prob < TopCaseProb)
12598 TopCaseProb = CC.
Prob;
12599 PeeledCaseIndex =
Index;
12600 SwitchPeeled =
true;
12605 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12606 << TopCaseProb <<
"\n");
12611 MachineBasicBlock *PeeledSwitchMBB =
12613 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12616 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12617 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12618 nullptr,
nullptr, TopCaseProb.
getCompl()};
12619 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12621 Clusters.erase(PeeledCaseIt);
12622 for (CaseCluster &CC : Clusters) {
12624 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12625 << CC.
Prob <<
"\n");
12629 PeeledCaseProb = TopCaseProb;
12630 return PeeledSwitchMBB;
12633void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12635 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12637 Clusters.reserve(
SI.getNumCases());
12638 for (
auto I :
SI.cases()) {
12639 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12640 const ConstantInt *CaseVal =
I.getCaseValue();
12641 BranchProbability Prob =
12643 : BranchProbability(1,
SI.getNumCases() + 1);
12647 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12656 MachineBasicBlock *PeeledSwitchMBB =
12657 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12660 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12661 if (Clusters.empty()) {
12662 assert(PeeledSwitchMBB == SwitchMBB);
12664 if (DefaultMBB != NextBlock(SwitchMBB)) {
12671 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12673 SL->findBitTestClusters(Clusters, &SI);
12676 dbgs() <<
"Case clusters: ";
12677 for (
const CaseCluster &
C : Clusters) {
12683 C.Low->getValue().print(
dbgs(),
true);
12684 if (
C.Low !=
C.High) {
12686 C.High->getValue().print(
dbgs(),
true);
12693 assert(!Clusters.empty());
12697 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12701 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12704 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12706 while (!WorkList.
empty()) {
12708 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12713 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12717 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12721void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12722 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12728void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12729 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12734 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12743 SmallVector<int, 8>
Mask;
12745 for (
unsigned i = 0; i != NumElts; ++i)
12746 Mask.push_back(NumElts - 1 - i);
12751void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12760 EVT OutVT = ValueVTs[0];
12764 for (
unsigned i = 0; i != Factor; ++i) {
12765 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12767 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12773 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12775 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12783 DAG.getVTList(ValueVTs), SubVecs);
12787void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12790 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12795 for (
unsigned i = 0; i < Factor; ++i) {
12798 "Expected VTs to be the same");
12816 for (
unsigned i = 0; i < Factor; ++i)
12823void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12827 unsigned NumValues = ValueVTs.
size();
12828 if (NumValues == 0)
return;
12833 for (
unsigned i = 0; i != NumValues; ++i)
12838 DAG.getVTList(ValueVTs), Values));
12841void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12842 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12854 DAG.getSignedConstant(
12861 uint64_t Idx = (NumElts +
Imm) % NumElts;
12864 SmallVector<int, 8>
Mask;
12865 for (
unsigned i = 0; i < NumElts; ++i)
12866 Mask.push_back(Idx + i);
12894 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12895 "start of copy chain MUST be COPY");
12896 Reg =
MI->getOperand(1).getReg();
12899 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12900 MI =
MRI.def_begin(
Reg)->getParent();
12903 if (
MI->getOpcode() == TargetOpcode::COPY) {
12904 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12905 Reg =
MI->getOperand(1).getReg();
12906 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12909 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12910 "end of copy chain MUST be INLINEASM_BR");
12920void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12926 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12927 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12928 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12936 for (
auto &
T : TargetConstraints) {
12937 SDISelAsmOperandInfo OpInfo(
T);
12945 switch (OpInfo.ConstraintType) {
12956 FuncInfo.MBB->addLiveIn(OriginalDef);
12964 ResultVTs.
push_back(OpInfo.ConstraintVT);
12973 ResultVTs.
push_back(OpInfo.ConstraintVT);
12981 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)