79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID> CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT, CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
558 assert(NumParts == 1 && PartEVT != ValueVT);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
745 "lossy conversion of vector to scalar type");
760 unsigned NumIntermediates;
764 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
769 NumIntermediates, RegisterVT);
772 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
774 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
777 "Mixing scalable and fixed vectors when copying in parts");
779 std::optional<ElementCount> DestEltCnt;
789 if (ValueVT == BuiltVectorTy) {
813 for (
unsigned i = 0; i != NumIntermediates; ++i) {
828 if (NumParts == NumIntermediates) {
831 for (
unsigned i = 0; i != NumParts; ++i)
833 }
else if (NumParts > 0) {
836 assert(NumIntermediates != 0 &&
"division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (
unsigned i = 0; i != NumIntermediates; ++i)
848 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
852 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
855 OS << LS << U.getTagName();
858 Twine(
"cannot lower ", Name)
864 EVT valuevt, std::optional<CallingConv::ID> CC)
870 std::optional<CallingConv::ID> CC) {
884 for (
unsigned i = 0; i != NumRegs; ++i)
885 Regs.push_back(Reg + i);
886 RegVTs.push_back(RegisterVT);
888 Reg = Reg.id() + NumRegs;
915 for (
unsigned i = 0; i != NumRegs; ++i) {
921 *Glue =
P.getValue(2);
924 Chain =
P.getValue(1);
952 EVT FromVT(MVT::Other);
956 }
else if (NumSignBits > 1) {
964 assert(FromVT != MVT::Other);
970 RegisterVT, ValueVT, V, Chain,
CallConv);
986 unsigned NumRegs =
Regs.size();
1000 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1006 for (
unsigned i = 0; i != NumRegs; ++i) {
1018 if (NumRegs == 1 || Glue)
1029 Chain = Chains[NumRegs-1];
1035 unsigned MatchingIdx,
const SDLoc &dl,
1037 std::vector<SDValue> &
Ops)
const {
1042 Flag.setMatchingOp(MatchingIdx);
1043 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1051 Flag.setRegClass(RC->
getID());
1062 "No 1:1 mapping from clobbers to regs?");
1065 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1070 "If we clobbered the stack pointer, MFI should know about it.");
1079 for (
unsigned i = 0; i != NumRegs; ++i) {
1080 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1092 unsigned RegCount = std::get<0>(CountAndVT);
1093 MVT RegisterVT = std::get<1>(CountAndVT);
1111 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1113 *
DAG.getMachineFunction().getFunction().getParent());
1118 UnusedArgNodeMap.clear();
1120 PendingExports.clear();
1121 PendingConstrainedFP.clear();
1122 PendingConstrainedFPStrict.clear();
1130 DanglingDebugInfoMap.clear();
1137 if (Pending.
empty())
1143 unsigned i = 0, e = Pending.
size();
1144 for (; i != e; ++i) {
1146 if (Pending[i].
getNode()->getOperand(0) == Root)
1154 if (Pending.
size() == 1)
1181 if (!PendingConstrainedFPStrict.empty()) {
1182 assert(PendingConstrainedFP.empty());
1183 updateRoot(PendingConstrainedFPStrict);
1196 if (!PendingConstrainedFP.empty()) {
1197 assert(PendingConstrainedFPStrict.empty());
1198 updateRoot(PendingConstrainedFP);
1202 return DAG.getRoot();
1210 PendingConstrainedFP.size() +
1211 PendingConstrainedFPStrict.size());
1213 PendingConstrainedFP.end());
1214 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1215 PendingConstrainedFPStrict.end());
1216 PendingConstrainedFP.clear();
1217 PendingConstrainedFPStrict.clear();
1224 PendingExports.append(PendingConstrainedFPStrict.begin(),
1225 PendingConstrainedFPStrict.end());
1226 PendingConstrainedFPStrict.clear();
1227 return updateRoot(PendingExports);
1234 assert(Variable &&
"Missing variable");
1241 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1257 if (IsParameter && FINode) {
1259 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1260 true,
DL, SDNodeOrder);
1265 FuncArgumentDbgValueKind::Declare,
N);
1268 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1269 true,
DL, SDNodeOrder);
1271 DAG.AddDbgValue(SDV, IsParameter);
1276 FuncArgumentDbgValueKind::Declare,
N)) {
1278 <<
" (could not emit func-arg dbg_value)\n");
1289 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1291 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1293 if (It->Values.isKillLocation(It->Expr)) {
1299 It->Values.hasArgList())) {
1302 FnVarLocs->getDILocalVariable(It->VariableID),
1303 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1316 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1319 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1321 assert(DLR->getLabel() &&
"Missing label");
1323 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1324 DAG.AddDbgLabel(SDV);
1328 if (SkipDbgVariableRecords)
1336 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1338 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1347 if (Values.
empty()) {
1364 SDNodeOrder, IsVariadic)) {
1375 if (
I.isTerminator()) {
1376 HandlePHINodesInSuccessorBlocks(
I.getParent());
1383 bool NodeInserted =
false;
1384 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1385 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1386 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1387 if (PCSectionsMD || MMRA) {
1388 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1389 DAG, [&](
SDNode *) { NodeInserted =
true; });
1399 if (PCSectionsMD || MMRA) {
1400 auto It = NodeMap.find(&
I);
1401 if (It != NodeMap.end()) {
1403 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1405 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1406 }
else if (NodeInserted) {
1409 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1410 <<
I.getModule()->getName() <<
"]\n";
1419void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1429#define HANDLE_INST(NUM, OPCODE, CLASS) \
1430 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1431#include "llvm/IR/Instruction.def"
1443 for (
const Value *V : Values) {
1468 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1473 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1474 DIVariable *DanglingVariable = DDI.getVariable();
1476 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1478 << printDDI(
nullptr, DDI) <<
"\n");
1484 for (
auto &DDIMI : DanglingDebugInfoMap) {
1485 DanglingDebugInfoVector &DDIV = DDIMI.second;
1489 for (
auto &DDI : DDIV)
1490 if (isMatchingDbgValue(DDI))
1493 erase_if(DDIV, isMatchingDbgValue);
1501 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1502 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1505 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1506 for (
auto &DDI : DDIV) {
1508 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1511 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1512 "Expected inlined-at fields to agree");
1522 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1523 FuncArgumentDbgValueKind::Value, Val)) {
1525 << printDDI(V, DDI) <<
"\n");
1532 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1533 << ValSDNodeOrder <<
"\n");
1534 SDV = getDbgValue(Val, Variable, Expr,
DL,
1535 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1536 DAG.AddDbgValue(SDV,
false);
1540 <<
" in EmitFuncArgumentDbgValue\n");
1542 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1546 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1547 DAG.AddDbgValue(SDV,
false);
1554 DanglingDebugInfo &DDI) {
1559 const Value *OrigV = V;
1563 unsigned SDOrder = DDI.getSDNodeOrder();
1567 bool StackValue =
true;
1592 if (!AdditionalValues.
empty())
1602 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1603 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1611 assert(OrigV &&
"V shouldn't be null");
1613 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1614 DAG.AddDbgValue(SDV,
false);
1616 << printDDI(OrigV, DDI) <<
"\n");
1633 unsigned Order,
bool IsVariadic) {
1638 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1643 for (
const Value *V : Values) {
1653 if (CE->getOpcode() == Instruction::IntToPtr) {
1672 N = UnusedArgNodeMap[V];
1677 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1678 FuncArgumentDbgValueKind::Value,
N))
1705 bool IsParamOfFunc =
1713 auto VMI =
FuncInfo.ValueMap.find(V);
1714 if (VMI !=
FuncInfo.ValueMap.end()) {
1719 V->getType(), std::nullopt);
1725 unsigned BitsToDescribe = 0;
1727 BitsToDescribe = *VarSize;
1729 BitsToDescribe = Fragment->SizeInBits;
1732 if (
Offset >= BitsToDescribe)
1735 unsigned RegisterSize = RegAndSize.second;
1736 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1737 ? BitsToDescribe -
Offset
1740 Expr,
Offset, FragmentSize);
1744 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1745 DAG.AddDbgValue(SDV,
false);
1761 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1762 false, DbgLoc, Order, IsVariadic);
1763 DAG.AddDbgValue(SDV,
false);
1769 for (
auto &Pair : DanglingDebugInfoMap)
1770 for (
auto &DDI : Pair.second)
1781 if (It !=
FuncInfo.ValueMap.end()) {
1785 DAG.getDataLayout(), InReg, Ty,
1802 if (
N.getNode())
return N;
1862 return DAG.getSplatBuildVector(
1865 return DAG.getConstant(*CI,
DL, VT);
1874 getValue(CPA->getAddrDiscriminator()),
1875 getValue(CPA->getDiscriminator()));
1891 visit(CE->getOpcode(), *CE);
1893 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1899 for (
const Use &U :
C->operands()) {
1905 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1906 Constants.push_back(
SDValue(Val, i));
1915 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1919 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1928 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1930 "Unknown struct or array constant!");
1934 unsigned NumElts = ValueVTs.
size();
1938 for (
unsigned i = 0; i != NumElts; ++i) {
1939 EVT EltVT = ValueVTs[i];
1941 Constants[i] =
DAG.getUNDEF(EltVT);
1952 return DAG.getBlockAddress(BA, VT);
1955 return getValue(Equiv->getGlobalValue());
1960 if (VT == MVT::aarch64svcount) {
1961 assert(
C->isNullValue() &&
"Can only zero this target type!");
1967 assert(
C->isNullValue() &&
"Can only zero this target type!");
1984 for (
unsigned i = 0; i != NumElements; ++i)
2012 return DAG.getFrameIndex(
2020 Inst->getType(), std::nullopt);
2034void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2047 if (IsMSVCCXX || IsCoreCLR)
2053 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2054 FuncInfo.MBB->addSuccessor(TargetMBB);
2061 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2070 DAG.getMachineFunction().setHasEHContTarget(
true);
2076 Value *ParentPad =
I.getCatchSwitchParentPad();
2079 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2082 assert(SuccessorColor &&
"No parent funclet for catchret!");
2083 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2084 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2089 DAG.getBasicBlock(SuccessorColorMBB));
2093void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2099 FuncInfo.MBB->setIsEHFuncletEntry();
2100 FuncInfo.MBB->setIsCleanupFuncletEntry();
2129 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2135 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2136 UnwindDests.back().first->setIsEHScopeEntry();
2139 UnwindDests.back().first->setIsEHFuncletEntry();
2143 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2144 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2146 if (IsMSVCCXX || IsCoreCLR)
2147 UnwindDests.back().first->setIsEHFuncletEntry();
2149 UnwindDests.back().first->setIsEHScopeEntry();
2151 NewEHPadBB = CatchSwitch->getUnwindDest();
2157 if (BPI && NewEHPadBB)
2159 EHPadBB = NewEHPadBB;
2166 auto UnwindDest =
I.getUnwindDest();
2167 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2168 BranchProbability UnwindDestProb =
2173 for (
auto &UnwindDest : UnwindDests) {
2174 UnwindDest.first->setIsEHPad();
2175 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2177 FuncInfo.MBB->normalizeSuccProbs();
2180 MachineBasicBlock *CleanupPadMBB =
2181 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2187void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2191void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2192 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2193 auto &
DL =
DAG.getDataLayout();
2205 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2222 SmallVector<uint64_t, 4>
Offsets;
2225 unsigned NumValues = ValueVTs.
size();
2228 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2229 for (
unsigned i = 0; i != NumValues; ++i) {
2236 if (MemVTs[i] != ValueVTs[i])
2238 Chains[i] =
DAG.getStore(
2246 MVT::Other, Chains);
2247 }
else if (
I.getNumOperands() != 0) {
2250 unsigned NumValues =
Types.size();
2254 const Function *
F =
I.getParent()->getParent();
2257 I.getOperand(0)->getType(),
F->getCallingConv(),
2261 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2263 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2266 LLVMContext &
Context =
F->getContext();
2267 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2269 for (
unsigned j = 0;
j != NumValues; ++
j) {
2282 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2285 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2289 if (
I.getOperand(0)->getType()->isPointerTy()) {
2291 Flags.setPointerAddrSpace(
2295 if (NeedsRegBlock) {
2296 Flags.setInConsecutiveRegs();
2297 if (j == NumValues - 1)
2298 Flags.setInConsecutiveRegsLast();
2306 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2309 for (
unsigned i = 0; i < NumParts; ++i) {
2312 VT, Types[j], 0, 0));
2322 const Function *
F =
I.getParent()->getParent();
2324 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2326 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2327 Flags.setSwiftError();
2339 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2341 DAG.getMachineFunction().getFunction().getCallingConv();
2342 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2347 "LowerReturn didn't return a valid chain!");
2358 if (V->getType()->isEmptyTy())
2362 if (VMI !=
FuncInfo.ValueMap.end()) {
2364 "Unused value assigned virtual registers!");
2377 if (
FuncInfo.isExportedInst(V))
return;
2389 if (VI->getParent() == FromBB)
2415 const BasicBlock *SrcBB = Src->getBasicBlock();
2416 const BasicBlock *DstBB = Dst->getBasicBlock();
2420 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2430 Src->addSuccessorWithoutProb(Dst);
2433 Prob = getEdgeProbability(Src, Dst);
2434 Src->addSuccessor(Dst, Prob);
2440 return I->getParent() == BB;
2464 if (CurBB == SwitchBB ||
2470 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2475 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2477 if (FC->hasNoNaNs() ||
2485 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2487 SL->SwitchCases.push_back(CB);
2496 SL->SwitchCases.push_back(CB);
2504 unsigned Depth = 0) {
2513 if (Necessary !=
nullptr) {
2516 if (Necessary->contains(
I))
2535 if (
I.getNumSuccessors() != 2)
2538 if (!
I.isConditional())
2550 if (BPI !=
nullptr) {
2556 std::optional<bool> Likely;
2559 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2563 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2575 if (CostThresh <= 0)
2596 Value *BrCond =
I.getCondition();
2597 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2598 for (
const auto *U : Ins->users()) {
2601 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2614 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2616 for (
const auto &InsPair : RhsDeps) {
2617 if (!ShouldCountInsn(InsPair.first)) {
2618 ToDrop = InsPair.first;
2622 if (ToDrop ==
nullptr)
2624 RhsDeps.erase(ToDrop);
2627 for (
const auto &InsPair : RhsDeps) {
2632 CostOfIncluding +=
TTI->getInstructionCost(
2635 if (CostOfIncluding > CostThresh)
2661 const Value *BOpOp0, *BOpOp1;
2675 if (BOpc == Instruction::And)
2676 BOpc = Instruction::Or;
2677 else if (BOpc == Instruction::Or)
2678 BOpc = Instruction::And;
2684 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2689 TProb, FProb, InvertCond);
2699 if (
Opc == Instruction::Or) {
2720 auto NewTrueProb = TProb / 2;
2721 auto NewFalseProb = TProb / 2 + FProb;
2724 NewFalseProb, InvertCond);
2731 Probs[1], InvertCond);
2733 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2753 auto NewTrueProb = TProb + FProb / 2;
2754 auto NewFalseProb = FProb / 2;
2757 NewFalseProb, InvertCond);
2764 Probs[1], InvertCond);
2773 if (Cases.size() != 2)
return true;
2777 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2778 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2779 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2780 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2786 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2787 Cases[0].CC == Cases[1].CC &&
2790 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2792 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2799void SelectionDAGBuilder::visitUncondBr(
const UncondBrInst &
I) {
2809 if (Succ0MBB != NextBlock(BrMBB) ||
2818void SelectionDAGBuilder::visitCondBr(
const CondBrInst &
I) {
2819 MachineBasicBlock *BrMBB =
FuncInfo.MBB;
2821 MachineBasicBlock *Succ0MBB =
FuncInfo.getMBB(
I.getSuccessor(0));
2825 const Value *CondVal =
I.getCondition();
2826 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2845 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2847 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2850 const Value *BOp0, *BOp1;
2853 Opcode = Instruction::And;
2855 Opcode = Instruction::Or;
2862 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2863 Opcode, BOp0, BOp1))) {
2865 getEdgeProbability(BrMBB, Succ0MBB),
2866 getEdgeProbability(BrMBB, Succ1MBB),
2871 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2875 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2882 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2888 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2889 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2891 SL->SwitchCases.clear();
2897 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2918 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2925 auto &TLI =
DAG.getTargetLoweringInfo();
2949 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2961 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2965 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2966 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2981 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2997 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3000 DAG.setRoot(BrCond);
3006 assert(JT.
SL &&
"Should set SDLoc for SelectionDAG!");
3007 assert(JT.
Reg &&
"Should lower JT Header first!");
3008 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3012 Index.getValue(1), Table, Index);
3013 DAG.setRoot(BrJumpTable);
3021 assert(JT.
SL &&
"Should set SDLoc for SelectionDAG!");
3028 DAG.getConstant(JTH.
First, dl, VT));
3043 JT.
Reg = JumpTableReg;
3051 Sub.getValueType()),
3055 MVT::Other, CopyTo, CMP,
3059 if (JT.
MBB != NextBlock(SwitchBB))
3060 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3061 DAG.getBasicBlock(JT.
MBB));
3063 DAG.setRoot(BrCond);
3066 if (JT.
MBB != NextBlock(SwitchBB))
3068 DAG.getBasicBlock(JT.
MBB)));
3070 DAG.setRoot(CopyTo);
3094 if (PtrTy != PtrMemTy)
3110 auto &
DL =
DAG.getDataLayout();
3119 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3126 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3140 assert(GuardCheckFn &&
"Guard check function is null");
3151 Entry.IsInReg =
true;
3152 Args.push_back(Entry);
3158 getValue(GuardCheckFn), std::move(Args));
3160 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3161 DAG.setRoot(Result.second);
3173 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3179 Guard =
DAG.getPOISON(PtrMemTy);
3221 auto &
DL =
DAG.getDataLayout();
3229 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3235 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3250 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3251 Entry.IsInReg =
true;
3252 Args.push_back(Entry);
3258 getValue(GuardCheckFn), std::move(Args));
3264 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3287 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3291 bool UsePtrType =
false;
3315 if (!
B.FallthroughUnreachable)
3316 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3317 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3321 if (!
B.FallthroughUnreachable) {
3330 DAG.getBasicBlock(
B.Default));
3334 if (
MBB != NextBlock(SwitchBB))
3352 if (PopCount == 1) {
3359 }
else if (PopCount == BB.
Range) {
3367 DAG.getConstant(1, dl, VT), ShiftOp);
3371 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3378 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3380 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3388 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3391 if (NextMBB != NextBlock(SwitchBB))
3392 BrAnd =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrAnd,
3393 DAG.getBasicBlock(NextMBB));
3398void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3416 const Value *Callee(
I.getCalledOperand());
3419 visitInlineAsm(
I, EHPadBB);
3424 case Intrinsic::donothing:
3426 case Intrinsic::seh_try_begin:
3427 case Intrinsic::seh_scope_begin:
3428 case Intrinsic::seh_try_end:
3429 case Intrinsic::seh_scope_end:
3435 case Intrinsic::experimental_patchpoint_void:
3436 case Intrinsic::experimental_patchpoint:
3437 visitPatchpoint(
I, EHPadBB);
3439 case Intrinsic::experimental_gc_statepoint:
3445 case Intrinsic::wasm_throw: {
3447 std::array<SDValue, 4>
Ops = {
3458 case Intrinsic::wasm_rethrow: {
3459 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3460 std::array<SDValue, 2>
Ops = {
3469 }
else if (
I.hasDeoptState()) {
3490 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3491 BranchProbability EHPadBBProb =
3497 addSuccessorWithProb(InvokeMBB, Return);
3498 for (
auto &UnwindDest : UnwindDests) {
3499 UnwindDest.first->setIsEHPad();
3500 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3506 DAG.getBasicBlock(Return)));
3515void SelectionDAGBuilder::visitCallBrIntrinsic(
const CallBrInst &
I) {
3518 DAG.getTargetLoweringInfo().getTgtMemIntrinsic(
3519 Infos,
I,
DAG.getMachineFunction(),
I.getIntrinsicID());
3520 assert(Infos.
empty() &&
"Intrinsic touches memory");
3523 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
3526 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad);
3527 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
3531 getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
3532 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
3537void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3538 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3540 if (
I.isInlineAsm()) {
3547 assert(!
I.hasOperandBundles() &&
3548 "Can't have operand bundles for intrinsics");
3549 visitCallBrIntrinsic(
I);
3554 SmallPtrSet<BasicBlock *, 8> Dests;
3555 Dests.
insert(
I.getDefaultDest());
3565 if (
I.isInlineAsm()) {
3566 for (BasicBlock *Dest :
I.getIndirectDests()) {
3568 Target->setIsInlineAsmBrIndirectTarget();
3574 Target->setLabelMustBeEmitted();
3576 if (Dests.
insert(Dest).second)
3585 DAG.getBasicBlock(Return)));
3588void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3589 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3592void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3594 "Call to landingpad not in landing pad!");
3598 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3614 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3619 if (
FuncInfo.ExceptionPointerVirtReg) {
3620 Ops[0] =
DAG.getZExtOrTrunc(
3621 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3628 Ops[1] =
DAG.getZExtOrTrunc(
3629 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3636 DAG.getVTList(ValueVTs),
Ops);
3644 if (JTB.first.HeaderBB ==
First)
3645 JTB.first.HeaderBB =
Last;
3658 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3660 bool Inserted =
Done.insert(BB).second;
3665 addSuccessorWithProb(IndirectBrMBB, Succ);
3675 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3676 DAG.getTarget().Options.NoTrapAfterNoreturn))
3682void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3685 Flags.copyFMF(*FPOp);
3693void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3696 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3697 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3700 Flags.setExact(ExactOp->isExact());
3702 Flags.setDisjoint(DisjointOp->isDisjoint());
3704 Flags.copyFMF(*FPOp);
3713void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3717 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3722 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3724 "Unexpected shift type");
3734 if (
const OverflowingBinaryOperator *OFBinOp =
3736 nuw = OFBinOp->hasNoUnsignedWrap();
3737 nsw = OFBinOp->hasNoSignedWrap();
3739 if (
const PossiblyExactOperator *ExactOp =
3741 exact = ExactOp->isExact();
3744 Flags.setExact(exact);
3745 Flags.setNoSignedWrap(nsw);
3746 Flags.setNoUnsignedWrap(nuw);
3752void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3763void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3769 auto &TLI =
DAG.getTargetLoweringInfo();
3782 Flags.setSameSign(
I.hasSameSign());
3783 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3785 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3790void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3797 if (FPMO->hasNoNaNs() ||
3798 (
DAG.isKnownNeverNaN(Op1) &&
DAG.isKnownNeverNaN(Op2)))
3802 Flags.copyFMF(*FPMO);
3803 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3805 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3815 return isa<SelectInst>(V);
3819void SelectionDAGBuilder::visitSelect(
const User &
I) {
3823 unsigned NumValues = ValueVTs.
size();
3824 if (NumValues == 0)
return;
3834 bool IsUnaryAbs =
false;
3835 bool Negate =
false;
3839 Flags.copyFMF(*FPOp);
3841 Flags.setUnpredictable(
3846 EVT VT = ValueVTs[0];
3847 LLVMContext &Ctx = *
DAG.getContext();
3848 auto &TLI =
DAG.getTargetLoweringInfo();
3858 bool UseScalarMinMax = VT.
isVector() &&
3867 switch (SPR.Flavor) {
3876 switch (SPR.NaNBehavior) {
3881 Flags.setNoSignedZeros(
true);
3895 switch (SPR.NaNBehavior) {
3900 Flags.setNoSignedZeros(
true);
3942 for (
unsigned i = 0; i != NumValues; ++i) {
3948 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3951 for (
unsigned i = 0; i != NumValues; ++i) {
3955 Values[i] =
DAG.getNode(
3962 DAG.getVTList(ValueVTs), Values));
3965void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3968 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3972 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3973 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3979void SelectionDAGBuilder::visitZExt(
const User &
I) {
3983 auto &TLI =
DAG.getTargetLoweringInfo();
3988 Flags.setNonNeg(PNI->hasNonNeg());
3993 if (
Flags.hasNonNeg() &&
4002void SelectionDAGBuilder::visitSExt(
const User &
I) {
4006 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4011void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
4017 Flags.copyFMF(*FPOp);
4018 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4021 DAG.getTargetConstant(
4026void SelectionDAGBuilder::visitFPExt(
const User &
I) {
4029 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4033 Flags.copyFMF(*FPOp);
4037void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
4040 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4045void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
4048 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4053void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4056 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4060 Flags.setNonNeg(PNI->hasNonNeg());
4065void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4068 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4073void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4076 const auto &TLI =
DAG.getTargetLoweringInfo();
4084void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4088 auto &TLI =
DAG.getTargetLoweringInfo();
4089 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4098void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4102 auto &TLI =
DAG.getTargetLoweringInfo();
4110void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4113 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4118 if (DestVT !=
N.getValueType())
4126 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4132void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4133 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4134 const Value *SV =
I.getOperand(0);
4139 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4141 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4147void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4148 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4155 InVec, InVal, InIdx));
4158void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4159 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4168void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4173 Mask = SVI->getShuffleMask();
4177 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4185 DAG.getVectorIdxConstant(0,
DL));
4196 unsigned MaskNumElts =
Mask.size();
4198 if (SrcNumElts == MaskNumElts) {
4204 if (SrcNumElts < MaskNumElts) {
4208 if (MaskNumElts % SrcNumElts == 0) {
4212 unsigned NumConcat = MaskNumElts / SrcNumElts;
4213 bool IsConcat =
true;
4214 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4215 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4221 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4222 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4223 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4228 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4235 for (
auto Src : ConcatSrcs) {
4248 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4249 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4265 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4266 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4268 if (Idx >= (
int)SrcNumElts)
4269 Idx -= SrcNumElts - PaddedMaskNumElts;
4277 if (MaskNumElts != PaddedMaskNumElts)
4279 DAG.getVectorIdxConstant(0,
DL));
4285 assert(SrcNumElts > MaskNumElts);
4289 int StartIdx[2] = {-1, -1};
4290 bool CanExtract =
true;
4291 for (
int Idx : Mask) {
4296 if (Idx >= (
int)SrcNumElts) {
4304 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4305 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4306 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4310 StartIdx[Input] = NewStartIdx;
4313 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4319 for (
unsigned Input = 0; Input < 2; ++Input) {
4320 SDValue &Src = Input == 0 ? Src1 : Src2;
4321 if (StartIdx[Input] < 0)
4322 Src =
DAG.getUNDEF(VT);
4325 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4330 SmallVector<int, 8> MappedOps(Mask);
4331 for (
int &Idx : MappedOps) {
4332 if (Idx >= (
int)SrcNumElts)
4333 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4338 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4347 for (
int Idx : Mask) {
4351 Res =
DAG.getUNDEF(EltVT);
4353 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4354 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4357 DAG.getVectorIdxConstant(Idx,
DL));
4367 ArrayRef<unsigned> Indices =
I.getIndices();
4368 const Value *Op0 =
I.getOperand(0);
4370 Type *AggTy =
I.getType();
4377 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4383 unsigned NumAggValues = AggValueVTs.
size();
4384 unsigned NumValValues = ValValueVTs.
size();
4388 if (!NumAggValues) {
4396 for (; i != LinearIndex; ++i)
4397 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4402 for (; i != LinearIndex + NumValValues; ++i)
4403 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4407 for (; i != NumAggValues; ++i)
4408 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4412 DAG.getVTList(AggValueVTs), Values));
4416 ArrayRef<unsigned> Indices =
I.getIndices();
4417 const Value *Op0 =
I.getOperand(0);
4419 Type *ValTy =
I.getType();
4424 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4428 unsigned NumValValues = ValValueVTs.
size();
4431 if (!NumValValues) {
4440 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4441 Values[i - LinearIndex] =
4447 DAG.getVTList(ValValueVTs), Values));
4450void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4451 Value *Op0 =
I.getOperand(0);
4457 auto &TLI =
DAG.getTargetLoweringInfo();
4462 bool IsVectorGEP =
I.getType()->isVectorTy();
4463 ElementCount VectorElementCount =
4469 const Value *Idx = GTI.getOperand();
4470 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4475 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4485 N =
DAG.getMemBasePlusOffset(
4486 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4492 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4494 TypeSize ElementSize =
4495 GTI.getSequentialElementStride(
DAG.getDataLayout());
4500 bool ElementScalable = ElementSize.
isScalable();
4506 C =
C->getSplatValue();
4509 if (CI && CI->isZero())
4511 if (CI && !ElementScalable) {
4512 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4515 if (
N.getValueType().isVector())
4516 OffsVal =
DAG.getConstant(
4519 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4526 Flags.setNoUnsignedWrap(
true);
4529 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4531 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4539 if (
N.getValueType().isVector()) {
4541 VectorElementCount);
4542 IdxN =
DAG.getSplat(VT, dl, IdxN);
4546 N =
DAG.getSplat(VT, dl,
N);
4552 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4554 SDNodeFlags ScaleFlags;
4563 if (ElementScalable) {
4564 EVT VScaleTy =
N.getValueType().getScalarType();
4567 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4568 if (
N.getValueType().isVector())
4569 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4570 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4575 if (ElementMul != 1) {
4576 if (ElementMul.isPowerOf2()) {
4577 unsigned Amt = ElementMul.logBase2();
4580 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4583 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4585 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4595 SDNodeFlags AddFlags;
4599 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4603 if (IsVectorGEP && !
N.getValueType().isVector()) {
4605 N =
DAG.getSplat(VT, dl,
N);
4616 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4621void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4628 Type *Ty =
I.getAllocatedType();
4629 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4630 auto &
DL =
DAG.getDataLayout();
4631 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4632 MaybeAlign Alignment =
I.getAlign();
4638 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4640 AllocSize =
DAG.getNode(
4642 DAG.getZExtOrTrunc(
DAG.getTypeSize(dl, MVT::i64, TySize), dl, IntPtr));
4647 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4648 if (*Alignment <= StackAlign)
4649 Alignment = std::nullopt;
4651 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4656 DAG.getConstant(StackAlignMask, dl, IntPtr),
4661 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4665 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4675 return I.getMetadata(LLVMContext::MD_range);
4680 if (std::optional<ConstantRange> CR = CB->getRange())
4684 return std::nullopt;
4689 return CB->getRetNoFPClass();
4693void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4695 return visitAtomicLoad(
I);
4697 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4698 const Value *SV =
I.getOperand(0);
4703 if (Arg->hasSwiftErrorAttr())
4704 return visitLoadFromSwiftError(
I);
4708 if (Alloca->isSwiftError())
4709 return visitLoadFromSwiftError(
I);
4715 Type *Ty =
I.getType();
4719 unsigned NumValues = ValueVTs.
size();
4723 Align Alignment =
I.getAlign();
4724 AAMDNodes AAInfo =
I.getAAMetadata();
4726 bool isVolatile =
I.isVolatile();
4731 bool ConstantMemory =
false;
4738 BatchAA->pointsToConstantMemory(MemoryLocation(
4743 Root =
DAG.getEntryNode();
4744 ConstantMemory =
true;
4748 Root =
DAG.getRoot();
4759 unsigned ChainI = 0;
4760 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4776 MachinePointerInfo PtrInfo =
4778 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4779 : MachinePointerInfo();
4781 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4782 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4783 MMOFlags, AAInfo, Ranges);
4784 Chains[ChainI] =
L.getValue(1);
4786 if (MemVTs[i] != ValueVTs[i])
4787 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4789 if (MDNode *NoFPClassMD =
I.getMetadata(LLVMContext::MD_nofpclass)) {
4790 uint64_t FPTestInt =
4792 cast<ConstantAsMetadata>(NoFPClassMD->getOperand(0))->getValue())
4794 if (FPTestInt != fcNone) {
4795 SDValue FPTestConst =
4796 DAG.getTargetConstant(FPTestInt, SDLoc(), MVT::i32);
4797 L = DAG.getNode(ISD::AssertNoFPClass, dl, L.getValueType(), L,
4804 if (!ConstantMemory) {
4810 PendingLoads.push_back(Chain);
4814 DAG.getVTList(ValueVTs), Values));
4817void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4818 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4819 "call visitStoreToSwiftError when backend supports swifterror");
4822 SmallVector<uint64_t, 4>
Offsets;
4823 const Value *SrcV =
I.getOperand(0);
4825 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4826 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4827 "expect a single EVT for swifterror");
4836 SDValue(Src.getNode(), Src.getResNo()));
4837 DAG.setRoot(CopyNode);
4840void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4841 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4842 "call visitLoadFromSwiftError when backend supports swifterror");
4845 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4846 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4847 "Support volatile, non temporal, invariant for load_from_swift_error");
4849 const Value *SV =
I.getOperand(0);
4850 Type *Ty =
I.getType();
4853 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4855 I.getAAMetadata()))) &&
4856 "load_from_swift_error should not be constant memory");
4859 SmallVector<uint64_t, 4>
Offsets;
4861 ValueVTs,
nullptr, &Offsets, 0);
4862 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4863 "expect a single EVT for swifterror");
4873void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4875 return visitAtomicStore(
I);
4877 const Value *SrcV =
I.getOperand(0);
4878 const Value *PtrV =
I.getOperand(1);
4880 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4885 if (Arg->hasSwiftErrorAttr())
4886 return visitStoreToSwiftError(
I);
4890 if (Alloca->isSwiftError())
4891 return visitStoreToSwiftError(
I);
4898 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4899 unsigned NumValues = ValueVTs.
size();
4912 Align Alignment =
I.getAlign();
4913 AAMDNodes AAInfo =
I.getAAMetadata();
4917 unsigned ChainI = 0;
4918 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4928 MachinePointerInfo PtrInfo =
4930 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4931 : MachinePointerInfo();
4935 if (MemVTs[i] != ValueVTs[i])
4936 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4938 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4939 Chains[ChainI] = St;
4945 DAG.setRoot(StoreNode);
4948void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4949 bool IsCompressing) {
4952 Value *Src0Operand =
I.getArgOperand(0);
4953 Value *PtrOperand =
I.getArgOperand(1);
4954 Value *MaskOperand =
I.getArgOperand(2);
4955 Align Alignment =
I.getParamAlign(1).valueOrOne();
4965 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4968 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4969 MachinePointerInfo(PtrOperand), MMOFlags,
4972 const auto &TLI =
DAG.getTargetLoweringInfo();
4975 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4976 I.getArgOperand(0)->getType(),
true)
4982 DAG.setRoot(StoreNode);
5012 C =
C->getSplatValue();
5026 if (!
GEP ||
GEP->getParent() != CurBB)
5029 if (
GEP->getNumOperands() != 2)
5032 const Value *BasePtr =
GEP->getPointerOperand();
5033 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
5039 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
5044 if (ScaleVal != 1 &&
5056void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5060 const Value *Ptr =
I.getArgOperand(1);
5064 Align Alignment =
I.getParamAlign(1).valueOrOne();
5065 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5074 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5084 EVT IdxVT =
Index.getValueType();
5092 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5094 DAG.setRoot(Scatter);
5098void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5101 Value *PtrOperand =
I.getArgOperand(0);
5102 Value *MaskOperand =
I.getArgOperand(1);
5103 Value *Src0Operand =
I.getArgOperand(2);
5104 Align Alignment =
I.getParamAlign(0).valueOrOne();
5112 AAMDNodes AAInfo =
I.getAAMetadata();
5119 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5122 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5124 if (
I.hasMetadata(LLVMContext::MD_invariant_load))
5127 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5128 MachinePointerInfo(PtrOperand), MMOFlags,
5131 const auto &TLI =
DAG.getTargetLoweringInfo();
5138 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5143 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5150void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5154 const Value *Ptr =
I.getArgOperand(0);
5158 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5160 Align Alignment =
I.getParamAlign(0).valueOrOne();
5171 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5183 EVT IdxVT =
Index.getValueType();
5192 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5208 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5210 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5213 MachineFunction &MF =
DAG.getMachineFunction();
5215 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5216 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5220 dl, MemVT, VTs, InChain,
5228 DAG.setRoot(OutChain);
5231void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5234 switch (
I.getOperation()) {
5276 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5279 MachineFunction &MF =
DAG.getMachineFunction();
5281 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5282 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5285 DAG.getAtomic(NT, dl, MemVT, InChain,
5292 DAG.setRoot(OutChain);
5295void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5297 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5300 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5302 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5309void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5316 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5327 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5328 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5329 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5339 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5342 DAG.setRoot(OutChain);
5345void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5353 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5363 MachineFunction &MF =
DAG.getMachineFunction();
5365 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5366 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5370 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5377 DAG.setRoot(OutChain);
5385std::pair<bool, bool>
5386SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5388 bool HasChain = !
F->doesNotAccessMemory();
5390 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5392 return {HasChain, OnlyLoad};
5396 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5398 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5405 Ops.push_back(
DAG.getRoot());
5418 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5419 const Value *Arg =
I.getArgOperand(i);
5420 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5428 assert(CI->getBitWidth() <= 64 &&
5429 "large intrinsic immediates not handled");
5430 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5437 if (std::optional<OperandBundleUse> Bundle =
5439 auto *Sym = Bundle->Inputs[0].get();
5442 Ops.push_back(SDSym);
5445 if (std::optional<OperandBundleUse> Bundle =
5447 Value *Token = Bundle->Inputs[0].get();
5449 assert(
Ops.back().getValueType() != MVT::Glue &&
5450 "Did not expect another glue node here.");
5453 Ops.push_back(ConvControlToken);
5461 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5469 return DAG.getVTList(ValueVTs);
5473SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5496 if (
I.getType()->isVoidTy())
5511void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5513 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5517 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5520 TargetLowering::IntrinsicInfo *
Info = !Infos.
empty() ? &Infos[0] :
nullptr;
5523 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad, Info);
5524 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5529 Flags.copyFMF(*FPMO);
5530 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5537 if (!Infos.
empty()) {
5540 MachineFunction &MF =
DAG.getMachineFunction();
5542 for (
const auto &Info : Infos) {
5545 MachinePointerInfo MPI;
5547 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5548 else if (
Info.fallbackAddressSpace)
5549 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5550 EVT MemVT =
Info.memVT;
5552 if (
Size.hasValue() && !
Size.getValue())
5554 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5556 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
5564 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5567 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5624 SDValue TwoToFractionalPartOfX;
5701 if (
Op.getValueType() == MVT::f32 &&
5725 if (
Op.getValueType() == MVT::f32 &&
5824 if (
Op.getValueType() == MVT::f32 &&
5908 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5921 if (
Op.getValueType() == MVT::f32 &&
5998 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
6009 if (
Op.getValueType() == MVT::f32 &&
6022 bool IsExp10 =
false;
6023 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
6027 IsExp10 = LHSC->isExactlyValue(Ten);
6054 unsigned Val = RHSC->getSExtValue();
6083 CurSquare, CurSquare);
6088 if (RHSC->getSExtValue() < 0)
6102 EVT VT =
LHS.getValueType();
6125 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6129 Opcode, VT, ScaleInt);
6164 switch (
N.getOpcode()) {
6168 Op.getValueType().getSizeInBits());
6193bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6200 MachineFunction &MF =
DAG.getMachineFunction();
6201 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6205 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6210 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6217 auto *NewDIExpr = FragExpr;
6224 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6227 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6228 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6232 if (Kind == FuncArgumentDbgValueKind::Value) {
6237 if (!IsInEntryBlock)
6253 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6254 !
DL->getInlinedAt();
6256 if (!IsInPrologue && !VariableIsFunctionInputArg)
6290 if (VariableIsFunctionInputArg) {
6292 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6293 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6294 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6295 return !NodeMap[
V].getNode();
6300 bool IsIndirect =
false;
6301 std::optional<MachineOperand>
Op;
6303 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6304 if (FI != std::numeric_limits<int>::max())
6308 if (!
Op &&
N.getNode()) {
6311 if (ArgRegsAndSizes.
size() == 1)
6312 Reg = ArgRegsAndSizes.
front().first;
6315 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6322 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6326 if (!
Op &&
N.getNode()) {
6330 if (FrameIndexSDNode *FINode =
6337 auto splitMultiRegDbgValue =
6350 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6353 if (
Offset >= ExprFragmentSizeInBits)
6357 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6358 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6363 Expr,
Offset, RegFragmentSizeInBits);
6367 if (!FragmentExpr) {
6368 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6370 DAG.AddDbgValue(SDV,
false);
6373 MachineInstr *NewMI = MakeVRegDbgValue(
6374 Reg, *FragmentExpr, Kind != FuncArgumentDbgValueKind::Value);
6375 FuncInfo.ArgDbgValues.push_back(NewMI);
6384 if (VMI !=
FuncInfo.ValueMap.end()) {
6385 const auto &TLI =
DAG.getTargetLoweringInfo();
6386 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6387 V->getType(), std::nullopt);
6388 if (RFV.occupiesMultipleRegs())
6389 return splitMultiRegDbgValue(RFV.getRegsAndSizes());
6392 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6393 }
else if (ArgRegsAndSizes.
size() > 1) {
6396 return splitMultiRegDbgValue(ArgRegsAndSizes);
6404 "Expected inlined-at fields to agree");
6405 MachineInstr *NewMI =
nullptr;
6408 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6410 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6414 FuncInfo.ArgDbgValues.push_back(NewMI);
6423 unsigned DbgSDNodeOrder) {
6435 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6436 false, dl, DbgSDNodeOrder);
6438 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6439 false, dl, DbgSDNodeOrder);
6444 case Intrinsic::smul_fix:
6446 case Intrinsic::umul_fix:
6448 case Intrinsic::smul_fix_sat:
6450 case Intrinsic::umul_fix_sat:
6452 case Intrinsic::sdiv_fix:
6454 case Intrinsic::udiv_fix:
6456 case Intrinsic::sdiv_fix_sat:
6458 case Intrinsic::udiv_fix_sat:
6471 "expected call_preallocated_setup Value");
6472 for (
const auto *U : PreallocatedSetup->
users()) {
6474 const Function *Fn = UseCall->getCalledFunction();
6475 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6485bool SelectionDAGBuilder::visitEntryValueDbgValue(
6495 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6496 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6498 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6499 "couldn't find an associated register for the Argument\n");
6502 Register ArgVReg = ArgIt->getSecond();
6504 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6505 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6506 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6507 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6508 DAG.AddDbgValue(SDV,
false );
6511 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6512 "couldn't find a physical register\n");
6517void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6520 switch (Intrinsic) {
6521 case Intrinsic::experimental_convergence_anchor:
6524 case Intrinsic::experimental_convergence_entry:
6527 case Intrinsic::experimental_convergence_loop: {
6529 auto *Token = Bundle->Inputs[0].get();
6537void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6538 unsigned IntrinsicID) {
6541 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6542 "Tried to lower unsupported histogram type");
6548 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6549 DataLayout TargetDL =
DAG.getDataLayout();
6551 Align Alignment =
DAG.getEVTAlign(VT);
6564 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6565 MachinePointerInfo(AS),
6576 EVT IdxVT =
Index.getValueType();
6587 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6590 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6594 DAG.setRoot(Histogram);
6597void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6599 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6600 "Tried lowering invalid vector extract last");
6602 const DataLayout &Layout =
DAG.getDataLayout();
6606 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6616 EVT BoolVT =
Mask.getValueType().getScalarType();
6618 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6625void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6627 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6634 Flags.copyFMF(*FPOp);
6636 switch (Intrinsic) {
6639 visitTargetIntrinsic(
I, Intrinsic);
6641 case Intrinsic::vscale: {
6646 case Intrinsic::vastart: visitVAStart(
I);
return;
6647 case Intrinsic::vaend: visitVAEnd(
I);
return;
6648 case Intrinsic::vacopy: visitVACopy(
I);
return;
6649 case Intrinsic::returnaddress:
6654 case Intrinsic::addressofreturnaddress:
6659 case Intrinsic::sponentry:
6664 case Intrinsic::frameaddress:
6669 case Intrinsic::read_volatile_register:
6670 case Intrinsic::read_register: {
6671 Value *
Reg =
I.getArgOperand(0);
6677 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6682 case Intrinsic::write_register: {
6683 Value *
Reg =
I.getArgOperand(0);
6684 Value *RegValue =
I.getArgOperand(1);
6692 case Intrinsic::memcpy:
6693 case Intrinsic::memcpy_inline: {
6699 "memcpy_inline needs constant size");
6701 Align DstAlign = MCI.getDestAlign().valueOrOne();
6702 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6703 Align Alignment = std::min(DstAlign, SrcAlign);
6704 bool isVol = MCI.isVolatile();
6708 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6709 MCI.isForceInlined(), &
I, std::nullopt,
6710 MachinePointerInfo(
I.getArgOperand(0)),
6711 MachinePointerInfo(
I.getArgOperand(1)),
6713 updateDAGForMaybeTailCall(MC);
6716 case Intrinsic::memset:
6717 case Intrinsic::memset_inline: {
6723 "memset_inline needs constant size");
6725 Align DstAlign = MSII.getDestAlign().valueOrOne();
6726 bool isVol = MSII.isVolatile();
6729 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6730 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6731 updateDAGForMaybeTailCall(MC);
6734 case Intrinsic::memmove: {
6740 Align DstAlign = MMI.getDestAlign().valueOrOne();
6741 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6742 Align Alignment = std::min(DstAlign, SrcAlign);
6743 bool isVol = MMI.isVolatile();
6747 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6749 MachinePointerInfo(
I.getArgOperand(0)),
6750 MachinePointerInfo(
I.getArgOperand(1)),
6752 updateDAGForMaybeTailCall(MM);
6755 case Intrinsic::memcpy_element_unordered_atomic: {
6761 Type *LengthTy =
MI.getLength()->getType();
6762 unsigned ElemSz =
MI.getElementSizeInBytes();
6766 isTC, MachinePointerInfo(
MI.getRawDest()),
6767 MachinePointerInfo(
MI.getRawSource()));
6768 updateDAGForMaybeTailCall(MC);
6771 case Intrinsic::memmove_element_unordered_atomic: {
6777 Type *LengthTy =
MI.getLength()->getType();
6778 unsigned ElemSz =
MI.getElementSizeInBytes();
6782 isTC, MachinePointerInfo(
MI.getRawDest()),
6783 MachinePointerInfo(
MI.getRawSource()));
6784 updateDAGForMaybeTailCall(MC);
6787 case Intrinsic::memset_element_unordered_atomic: {
6793 Type *LengthTy =
MI.getLength()->getType();
6794 unsigned ElemSz =
MI.getElementSizeInBytes();
6798 isTC, MachinePointerInfo(
MI.getRawDest()));
6799 updateDAGForMaybeTailCall(MC);
6802 case Intrinsic::call_preallocated_setup: {
6804 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6811 case Intrinsic::call_preallocated_arg: {
6813 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6827 case Intrinsic::eh_typeid_for: {
6830 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6831 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6836 case Intrinsic::eh_return_i32:
6837 case Intrinsic::eh_return_i64:
6838 DAG.getMachineFunction().setCallsEHReturn(
true);
6845 case Intrinsic::eh_unwind_init:
6846 DAG.getMachineFunction().setCallsUnwindInit(
true);
6848 case Intrinsic::eh_dwarf_cfa:
6853 case Intrinsic::eh_sjlj_callsite: {
6855 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6860 case Intrinsic::eh_sjlj_functioncontext: {
6862 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6865 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6869 case Intrinsic::eh_sjlj_setjmp: {
6874 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6876 DAG.setRoot(
Op.getValue(1));
6879 case Intrinsic::eh_sjlj_longjmp:
6883 case Intrinsic::eh_sjlj_setup_dispatch:
6887 case Intrinsic::masked_gather:
6888 visitMaskedGather(
I);
6890 case Intrinsic::masked_load:
6893 case Intrinsic::masked_scatter:
6894 visitMaskedScatter(
I);
6896 case Intrinsic::masked_store:
6897 visitMaskedStore(
I);
6899 case Intrinsic::masked_expandload:
6900 visitMaskedLoad(
I,
true );
6902 case Intrinsic::masked_compressstore:
6903 visitMaskedStore(
I,
true );
6905 case Intrinsic::powi:
6909 case Intrinsic::log:
6912 case Intrinsic::log2:
6916 case Intrinsic::log10:
6920 case Intrinsic::exp:
6923 case Intrinsic::exp2:
6927 case Intrinsic::pow:
6931 case Intrinsic::sqrt:
6932 case Intrinsic::fabs:
6933 case Intrinsic::sin:
6934 case Intrinsic::cos:
6935 case Intrinsic::tan:
6936 case Intrinsic::asin:
6937 case Intrinsic::acos:
6938 case Intrinsic::atan:
6939 case Intrinsic::sinh:
6940 case Intrinsic::cosh:
6941 case Intrinsic::tanh:
6942 case Intrinsic::exp10:
6943 case Intrinsic::floor:
6944 case Intrinsic::ceil:
6945 case Intrinsic::trunc:
6946 case Intrinsic::rint:
6947 case Intrinsic::nearbyint:
6948 case Intrinsic::round:
6949 case Intrinsic::roundeven:
6950 case Intrinsic::canonicalize: {
6953 switch (Intrinsic) {
6955 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6956 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6957 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6958 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6959 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6960 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6961 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6962 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6963 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6964 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6965 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6966 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6967 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6968 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6969 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6970 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6972 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6979 getValue(
I.getArgOperand(0)).getValueType(),
6983 case Intrinsic::atan2:
6985 getValue(
I.getArgOperand(0)).getValueType(),
6989 case Intrinsic::lround:
6990 case Intrinsic::llround:
6991 case Intrinsic::lrint:
6992 case Intrinsic::llrint: {
6995 switch (Intrinsic) {
6997 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6999 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
7000 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
7009 case Intrinsic::minnum:
7011 getValue(
I.getArgOperand(0)).getValueType(),
7015 case Intrinsic::maxnum:
7017 getValue(
I.getArgOperand(0)).getValueType(),
7021 case Intrinsic::minimum:
7023 getValue(
I.getArgOperand(0)).getValueType(),
7027 case Intrinsic::maximum:
7029 getValue(
I.getArgOperand(0)).getValueType(),
7033 case Intrinsic::minimumnum:
7035 getValue(
I.getArgOperand(0)).getValueType(),
7039 case Intrinsic::maximumnum:
7041 getValue(
I.getArgOperand(0)).getValueType(),
7045 case Intrinsic::copysign:
7047 getValue(
I.getArgOperand(0)).getValueType(),
7051 case Intrinsic::ldexp:
7053 getValue(
I.getArgOperand(0)).getValueType(),
7057 case Intrinsic::modf:
7058 case Intrinsic::sincos:
7059 case Intrinsic::sincospi:
7060 case Intrinsic::frexp: {
7062 switch (Intrinsic) {
7065 case Intrinsic::sincos:
7068 case Intrinsic::sincospi:
7071 case Intrinsic::modf:
7074 case Intrinsic::frexp:
7080 SDVTList VTs =
DAG.getVTList(ValueVTs);
7082 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7085 case Intrinsic::arithmetic_fence: {
7087 getValue(
I.getArgOperand(0)).getValueType(),
7091 case Intrinsic::fma:
7097#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7098 case Intrinsic::INTRINSIC:
7099#include "llvm/IR/ConstrainedOps.def"
7102#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7103#include "llvm/IR/VPIntrinsics.def"
7106 case Intrinsic::fptrunc_round: {
7110 std::optional<RoundingMode> RoundMode =
7118 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7123 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7128 case Intrinsic::fmuladd: {
7133 getValue(
I.getArgOperand(0)).getValueType(),
7140 getValue(
I.getArgOperand(0)).getValueType(),
7156 case Intrinsic::fptosi_sat: {
7163 case Intrinsic::fptoui_sat: {
7170 case Intrinsic::convert_from_arbitrary_fp: {
7175 const fltSemantics *SrcSem =
7178 DAG.getContext()->emitError(
7179 "convert_from_arbitrary_fp: not implemented format '" + FormatStr +
7190 DAG.getTargetConstant(
static_cast<int>(SemEnum), sdl, MVT::i32);
7195 case Intrinsic::set_rounding:
7201 case Intrinsic::is_fpclass: {
7202 const DataLayout DLayout =
DAG.getDataLayout();
7204 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7207 MachineFunction &MF =
DAG.getMachineFunction();
7211 Flags.setNoFPExcept(
7212 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7228 case Intrinsic::get_fpenv: {
7229 const DataLayout DLayout =
DAG.getDataLayout();
7231 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7246 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7249 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7250 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7256 case Intrinsic::set_fpenv: {
7257 const DataLayout DLayout =
DAG.getDataLayout();
7260 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7273 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7275 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7278 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7283 case Intrinsic::reset_fpenv:
7286 case Intrinsic::get_fpmode:
7295 case Intrinsic::set_fpmode:
7300 case Intrinsic::reset_fpmode: {
7305 case Intrinsic::pcmarker: {
7310 case Intrinsic::readcyclecounter: {
7313 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7318 case Intrinsic::readsteadycounter: {
7321 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7326 case Intrinsic::bitreverse:
7328 getValue(
I.getArgOperand(0)).getValueType(),
7331 case Intrinsic::bswap:
7333 getValue(
I.getArgOperand(0)).getValueType(),
7336 case Intrinsic::cttz: {
7344 case Intrinsic::ctlz: {
7352 case Intrinsic::ctpop: {
7358 case Intrinsic::fshl:
7359 case Intrinsic::fshr: {
7360 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7364 EVT VT =
X.getValueType();
7375 case Intrinsic::clmul: {
7381 case Intrinsic::sadd_sat: {
7387 case Intrinsic::uadd_sat: {
7393 case Intrinsic::ssub_sat: {
7399 case Intrinsic::usub_sat: {
7405 case Intrinsic::sshl_sat:
7406 case Intrinsic::ushl_sat: {
7410 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
7415 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
7418 "Unexpected shift type");
7427 case Intrinsic::smul_fix:
7428 case Intrinsic::umul_fix:
7429 case Intrinsic::smul_fix_sat:
7430 case Intrinsic::umul_fix_sat: {
7438 case Intrinsic::sdiv_fix:
7439 case Intrinsic::udiv_fix:
7440 case Intrinsic::sdiv_fix_sat:
7441 case Intrinsic::udiv_fix_sat: {
7446 Op1, Op2, Op3,
DAG, TLI));
7449 case Intrinsic::smax: {
7455 case Intrinsic::smin: {
7461 case Intrinsic::umax: {
7467 case Intrinsic::umin: {
7473 case Intrinsic::abs: {
7479 case Intrinsic::scmp: {
7486 case Intrinsic::ucmp: {
7493 case Intrinsic::stackaddress:
7494 case Intrinsic::stacksave: {
7499 Res =
DAG.getNode(SDOpcode, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7504 case Intrinsic::stackrestore:
7508 case Intrinsic::get_dynamic_area_offset: {
7517 case Intrinsic::stackguard: {
7518 MachineFunction &MF =
DAG.getMachineFunction();
7524 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7528 LLVMContext &Ctx = *
DAG.getContext();
7529 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7536 MachinePointerInfo(
Global, 0), Align,
7545 case Intrinsic::stackprotector: {
7547 MachineFunction &MF =
DAG.getMachineFunction();
7567 Chain, sdl, Src, FIN,
7574 case Intrinsic::objectsize:
7577 case Intrinsic::is_constant:
7580 case Intrinsic::annotation:
7581 case Intrinsic::ptr_annotation:
7582 case Intrinsic::launder_invariant_group:
7583 case Intrinsic::strip_invariant_group:
7588 case Intrinsic::type_test:
7589 case Intrinsic::public_type_test:
7591 "LowerTypeTests pass before code generation");
7594 case Intrinsic::assume:
7595 case Intrinsic::experimental_noalias_scope_decl:
7596 case Intrinsic::var_annotation:
7597 case Intrinsic::sideeffect:
7602 case Intrinsic::codeview_annotation: {
7604 MachineFunction &MF =
DAG.getMachineFunction();
7613 case Intrinsic::init_trampoline: {
7621 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7629 case Intrinsic::adjust_trampoline:
7634 case Intrinsic::gcroot: {
7635 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7636 "only valid in functions with gc specified, enforced by Verifier");
7638 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7645 case Intrinsic::gcread:
7646 case Intrinsic::gcwrite:
7648 case Intrinsic::get_rounding:
7654 case Intrinsic::expect:
7655 case Intrinsic::expect_with_probability:
7661 case Intrinsic::ubsantrap:
7662 case Intrinsic::debugtrap:
7663 case Intrinsic::trap: {
7664 StringRef TrapFuncName =
7665 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7666 if (TrapFuncName.
empty()) {
7667 switch (Intrinsic) {
7668 case Intrinsic::trap:
7671 case Intrinsic::debugtrap:
7674 case Intrinsic::ubsantrap:
7677 DAG.getTargetConstant(
7683 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7684 I.hasFnAttr(Attribute::NoMerge));
7688 if (Intrinsic == Intrinsic::ubsantrap) {
7689 Value *Arg =
I.getArgOperand(0);
7693 TargetLowering::CallLoweringInfo CLI(
DAG);
7694 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7696 DAG.getExternalSymbol(TrapFuncName.
data(),
7699 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7705 case Intrinsic::allow_runtime_check:
7706 case Intrinsic::allow_ubsan_check:
7710 case Intrinsic::uadd_with_overflow:
7711 case Intrinsic::sadd_with_overflow:
7712 case Intrinsic::usub_with_overflow:
7713 case Intrinsic::ssub_with_overflow:
7714 case Intrinsic::umul_with_overflow:
7715 case Intrinsic::smul_with_overflow: {
7717 switch (Intrinsic) {
7719 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7720 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7721 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7722 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7723 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7724 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7732 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7736 case Intrinsic::prefetch: {
7751 std::nullopt, Flags);
7757 DAG.setRoot(Result);
7760 case Intrinsic::lifetime_start:
7761 case Intrinsic::lifetime_end: {
7762 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7768 if (!LifetimeObject)
7773 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7774 if (SI ==
FuncInfo.StaticAllocaMap.end())
7778 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7782 case Intrinsic::pseudoprobe: {
7790 case Intrinsic::invariant_start:
7795 case Intrinsic::invariant_end:
7798 case Intrinsic::clear_cache: {
7803 {InputChain, StartVal, EndVal});
7808 case Intrinsic::donothing:
7809 case Intrinsic::seh_try_begin:
7810 case Intrinsic::seh_scope_begin:
7811 case Intrinsic::seh_try_end:
7812 case Intrinsic::seh_scope_end:
7815 case Intrinsic::experimental_stackmap:
7818 case Intrinsic::experimental_patchpoint_void:
7819 case Intrinsic::experimental_patchpoint:
7822 case Intrinsic::experimental_gc_statepoint:
7825 case Intrinsic::experimental_gc_result:
7828 case Intrinsic::experimental_gc_relocate:
7831 case Intrinsic::instrprof_cover:
7833 case Intrinsic::instrprof_increment:
7835 case Intrinsic::instrprof_timestamp:
7837 case Intrinsic::instrprof_value_profile:
7839 case Intrinsic::instrprof_mcdc_parameters:
7841 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7843 case Intrinsic::localescape: {
7844 MachineFunction &MF =
DAG.getMachineFunction();
7845 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7849 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7855 "can only escape static allocas");
7860 TII->get(TargetOpcode::LOCAL_ESCAPE))
7868 case Intrinsic::localrecover: {
7870 MachineFunction &MF =
DAG.getMachineFunction();
7876 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7880 Value *
FP =
I.getArgOperand(1);
7886 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7891 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7897 case Intrinsic::fake_use: {
7898 Value *
V =
I.getArgOperand(0);
7903 auto FakeUseValue = [&]() ->
SDValue {
7917 if (!FakeUseValue || FakeUseValue.isUndef())
7920 Ops[1] = FakeUseValue;
7929 case Intrinsic::reloc_none: {
7934 DAG.getTargetExternalSymbol(
7940 case Intrinsic::cond_loop: {
7950 case Intrinsic::eh_exceptionpointer:
7951 case Intrinsic::eh_exceptioncode: {
7957 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7958 if (Intrinsic == Intrinsic::eh_exceptioncode)
7959 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7963 case Intrinsic::xray_customevent: {
7966 const auto &Triple =
DAG.getTarget().getTargetTriple();
7975 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7977 Ops.push_back(LogEntryVal);
7978 Ops.push_back(StrSizeVal);
7979 Ops.push_back(Chain);
7985 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7988 DAG.setRoot(patchableNode);
7992 case Intrinsic::xray_typedevent: {
7995 const auto &Triple =
DAG.getTarget().getTargetTriple();
8007 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
8009 Ops.push_back(LogTypeId);
8010 Ops.push_back(LogEntryVal);
8011 Ops.push_back(StrSizeVal);
8012 Ops.push_back(Chain);
8018 MachineSDNode *MN =
DAG.getMachineNode(
8019 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
8021 DAG.setRoot(patchableNode);
8025 case Intrinsic::experimental_deoptimize:
8028 case Intrinsic::stepvector:
8031 case Intrinsic::vector_reduce_fadd:
8032 case Intrinsic::vector_reduce_fmul:
8033 case Intrinsic::vector_reduce_add:
8034 case Intrinsic::vector_reduce_mul:
8035 case Intrinsic::vector_reduce_and:
8036 case Intrinsic::vector_reduce_or:
8037 case Intrinsic::vector_reduce_xor:
8038 case Intrinsic::vector_reduce_smax:
8039 case Intrinsic::vector_reduce_smin:
8040 case Intrinsic::vector_reduce_umax:
8041 case Intrinsic::vector_reduce_umin:
8042 case Intrinsic::vector_reduce_fmax:
8043 case Intrinsic::vector_reduce_fmin:
8044 case Intrinsic::vector_reduce_fmaximum:
8045 case Intrinsic::vector_reduce_fminimum:
8046 visitVectorReduce(
I, Intrinsic);
8049 case Intrinsic::icall_branch_funnel: {
8055 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
8058 "llvm.icall.branch.funnel operand must be a GlobalValue");
8059 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
8061 struct BranchFunnelTarget {
8067 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
8070 if (ElemBase !=
Base)
8072 "to the same GlobalValue");
8078 "llvm.icall.branch.funnel operand must be a GlobalValue");
8084 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
8085 return T1.Offset < T2.Offset;
8088 for (
auto &
T : Targets) {
8089 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
8090 Ops.push_back(
T.Target);
8093 Ops.push_back(
DAG.getRoot());
8094 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
8103 case Intrinsic::wasm_landingpad_index:
8109 case Intrinsic::aarch64_settag:
8110 case Intrinsic::aarch64_settag_zero: {
8111 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
8112 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
8115 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8121 case Intrinsic::amdgcn_cs_chain: {
8126 Type *RetTy =
I.getType();
8136 for (
unsigned Idx : {2, 3, 1}) {
8137 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8139 Arg.setAttributes(&
I, Idx);
8140 Args.push_back(Arg);
8143 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8144 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8145 Args[2].IsInReg =
true;
8148 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8149 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8151 Arg.setAttributes(&
I, Idx);
8152 Args.push_back(Arg);
8155 TargetLowering::CallLoweringInfo CLI(
DAG);
8158 .setCallee(CC, RetTy, Callee, std::move(Args))
8161 .setConvergent(
I.isConvergent());
8163 std::pair<SDValue, SDValue>
Result =
8167 "Should've lowered as tail call");
8172 case Intrinsic::amdgcn_call_whole_wave: {
8174 bool isTailCall =
I.isTailCall();
8177 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8178 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8179 I.getArgOperand(Idx)->getType());
8180 Arg.setAttributes(&
I, Idx);
8187 Args.push_back(Arg);
8192 auto *Token = Bundle->Inputs[0].get();
8193 ConvControlToken =
getValue(Token);
8196 TargetLowering::CallLoweringInfo CLI(
DAG);
8200 getValue(
I.getArgOperand(0)), std::move(Args))
8204 .setConvergent(
I.isConvergent())
8205 .setConvergenceControlToken(ConvControlToken);
8208 std::pair<SDValue, SDValue>
Result =
8211 if (
Result.first.getNode())
8215 case Intrinsic::ptrmask: {
8231 auto HighOnes =
DAG.getNode(
8232 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8233 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8236 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8237 }
else if (
Mask.getValueType() != PtrVT)
8238 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8244 case Intrinsic::threadlocal_address: {
8248 case Intrinsic::get_active_lane_mask: {
8252 EVT ElementVT =
Index.getValueType();
8263 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8264 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8265 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8268 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8273 case Intrinsic::experimental_get_vector_length: {
8275 "Expected positive VF");
8280 EVT CountVT =
Count.getValueType();
8283 visitTargetIntrinsic(
I, Intrinsic);
8292 if (CountVT.
bitsLT(VT)) {
8297 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8307 case Intrinsic::vector_partial_reduce_add: {
8315 case Intrinsic::vector_partial_reduce_fadd: {
8323 case Intrinsic::experimental_cttz_elts: {
8326 EVT OpVT =
Op.getValueType();
8329 visitTargetIntrinsic(
I, Intrinsic);
8345 ConstantRange VScaleRange(1,
true);
8374 case Intrinsic::vector_insert: {
8382 if (
Index.getValueType() != VectorIdxTy)
8383 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8390 case Intrinsic::vector_extract: {
8398 if (
Index.getValueType() != VectorIdxTy)
8399 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8405 case Intrinsic::experimental_vector_match: {
8411 EVT ResVT =
Mask.getValueType();
8417 visitTargetIntrinsic(
I, Intrinsic);
8421 SDValue Ret =
DAG.getConstant(0, sdl, ResVT);
8423 for (
unsigned i = 0; i < SearchSize; ++i) {
8426 DAG.getVectorIdxConstant(i, sdl));
8429 Ret =
DAG.getNode(
ISD::OR, sdl, ResVT, Ret, Cmp);
8435 case Intrinsic::vector_reverse:
8436 visitVectorReverse(
I);
8438 case Intrinsic::vector_splice_left:
8439 case Intrinsic::vector_splice_right:
8440 visitVectorSplice(
I);
8442 case Intrinsic::callbr_landingpad:
8443 visitCallBrLandingPad(
I);
8445 case Intrinsic::vector_interleave2:
8446 visitVectorInterleave(
I, 2);
8448 case Intrinsic::vector_interleave3:
8449 visitVectorInterleave(
I, 3);
8451 case Intrinsic::vector_interleave4:
8452 visitVectorInterleave(
I, 4);
8454 case Intrinsic::vector_interleave5:
8455 visitVectorInterleave(
I, 5);
8457 case Intrinsic::vector_interleave6:
8458 visitVectorInterleave(
I, 6);
8460 case Intrinsic::vector_interleave7:
8461 visitVectorInterleave(
I, 7);
8463 case Intrinsic::vector_interleave8:
8464 visitVectorInterleave(
I, 8);
8466 case Intrinsic::vector_deinterleave2:
8467 visitVectorDeinterleave(
I, 2);
8469 case Intrinsic::vector_deinterleave3:
8470 visitVectorDeinterleave(
I, 3);
8472 case Intrinsic::vector_deinterleave4:
8473 visitVectorDeinterleave(
I, 4);
8475 case Intrinsic::vector_deinterleave5:
8476 visitVectorDeinterleave(
I, 5);
8478 case Intrinsic::vector_deinterleave6:
8479 visitVectorDeinterleave(
I, 6);
8481 case Intrinsic::vector_deinterleave7:
8482 visitVectorDeinterleave(
I, 7);
8484 case Intrinsic::vector_deinterleave8:
8485 visitVectorDeinterleave(
I, 8);
8487 case Intrinsic::experimental_vector_compress:
8489 getValue(
I.getArgOperand(0)).getValueType(),
8494 case Intrinsic::experimental_convergence_anchor:
8495 case Intrinsic::experimental_convergence_entry:
8496 case Intrinsic::experimental_convergence_loop:
8497 visitConvergenceControl(
I, Intrinsic);
8499 case Intrinsic::experimental_vector_histogram_add: {
8500 visitVectorHistogram(
I, Intrinsic);
8503 case Intrinsic::experimental_vector_extract_last_active: {
8504 visitVectorExtractLastActive(
I, Intrinsic);
8507 case Intrinsic::loop_dependence_war_mask:
8512 DAG.getConstant(0, sdl, MVT::i64)));
8514 case Intrinsic::loop_dependence_raw_mask:
8519 DAG.getConstant(0, sdl, MVT::i64)));
8524void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8540 PendingConstrainedFP.push_back(OutChain);
8543 PendingConstrainedFPStrict.push_back(OutChain);
8548void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8562 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8564 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8568 Flags.setNoFPExcept(
true);
8571 Flags.copyFMF(*FPOp);
8576#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8577 case Intrinsic::INTRINSIC: \
8578 Opcode = ISD::STRICT_##DAGN; \
8580#include "llvm/IR/ConstrainedOps.def"
8581 case Intrinsic::experimental_constrained_fmuladd: {
8588 pushFPOpOutChain(
Mul, EB);
8611 if (
DAG.isKnownNeverNaN(Opers[1]) &&
DAG.isKnownNeverNaN(Opers[2]))
8619 pushFPOpOutChain(Result, EB);
8626 std::optional<unsigned> ResOPC;
8628 case Intrinsic::vp_ctlz: {
8630 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8633 case Intrinsic::vp_cttz: {
8635 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8638 case Intrinsic::vp_cttz_elts: {
8640 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8643#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8644 case Intrinsic::VPID: \
8645 ResOPC = ISD::VPSD; \
8647#include "llvm/IR/VPIntrinsics.def"
8652 "Inconsistency: no SDNode available for this VPIntrinsic!");
8654 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8655 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8657 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8658 : ISD::VP_REDUCE_FMUL;
8664void SelectionDAGBuilder::visitVPLoad(
8676 Alignment =
DAG.getEVTAlign(VT);
8679 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8680 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8683 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8684 MachinePointerInfo(PtrOperand), MMOFlags,
8686 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8693void SelectionDAGBuilder::visitVPLoadFF(
8696 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8706 Alignment =
DAG.getEVTAlign(VT);
8709 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8710 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8713 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8718 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8721void SelectionDAGBuilder::visitVPGather(
8725 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8737 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8739 *Alignment, AAInfo, Ranges);
8749 EVT IdxVT =
Index.getValueType();
8755 LD =
DAG.getGatherVP(
8756 DAG.getVTList(VT, MVT::Other), VT,
DL,
8757 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8763void SelectionDAGBuilder::visitVPStore(
8767 EVT VT = OpValues[0].getValueType();
8772 Alignment =
DAG.getEVTAlign(VT);
8775 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8778 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8779 MachinePointerInfo(PtrOperand), MMOFlags,
8788void SelectionDAGBuilder::visitVPScatter(
8791 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8793 EVT VT = OpValues[0].getValueType();
8803 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8805 *Alignment, AAInfo);
8815 EVT IdxVT =
Index.getValueType();
8821 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8822 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8823 OpValues[2], OpValues[3]},
8829void SelectionDAGBuilder::visitVPStridedLoad(
8841 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8843 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8846 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8848 *Alignment, AAInfo, Ranges);
8850 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8851 OpValues[2], OpValues[3], MMO,
8859void SelectionDAGBuilder::visitVPStridedStore(
8863 EVT VT = OpValues[0].getValueType();
8869 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8872 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8874 *Alignment, AAInfo);
8878 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8886void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8887 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8902 "Unexpected target EVL type");
8905 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8907 if (
DAG.isKnownNeverNaN(Op1) &&
DAG.isKnownNeverNaN(Op2))
8910 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8913void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8921 return visitVPCmp(*CmpI);
8924 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8926 SDVTList VTs =
DAG.getVTList(ValueVTs);
8932 "Unexpected target EVL type");
8936 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8938 if (
I == EVLParamPos)
8945 SDNodeFlags SDFlags;
8953 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8955 case ISD::VP_LOAD_FF:
8956 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8958 case ISD::VP_GATHER:
8959 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8961 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8962 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8965 visitVPStore(VPIntrin, OpValues);
8967 case ISD::VP_SCATTER:
8968 visitVPScatter(VPIntrin, OpValues);
8970 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8971 visitVPStridedStore(VPIntrin, OpValues);
8973 case ISD::VP_FMULADD: {
8974 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8975 SDNodeFlags SDFlags;
8980 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8983 ISD::VP_FMUL,
DL, VTs,
8984 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8986 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8987 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8992 case ISD::VP_IS_FPCLASS: {
8993 const DataLayout DLayout =
DAG.getDataLayout();
8995 auto Constant = OpValues[1]->getAsZExtVal();
8998 {OpValues[0],
Check, OpValues[2], OpValues[3]});
9002 case ISD::VP_INTTOPTR: {
9013 case ISD::VP_PTRTOINT: {
9015 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9028 case ISD::VP_CTLZ_ZERO_UNDEF:
9030 case ISD::VP_CTTZ_ZERO_UNDEF:
9031 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
9032 case ISD::VP_CTTZ_ELTS: {
9034 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
9044 MachineFunction &MF =
DAG.getMachineFunction();
9052 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
9053 if (CallSiteIndex) {
9067 assert(BeginLabel &&
"BeginLabel should've been set");
9069 MachineFunction &MF =
DAG.getMachineFunction();
9081 assert(
II &&
"II should've been set");
9092std::pair<SDValue, SDValue>
9106 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
9109 "Non-null chain expected with non-tail call!");
9110 assert((Result.second.getNode() || !Result.first.getNode()) &&
9111 "Null value expected with tail call!");
9113 if (!Result.second.getNode()) {
9120 PendingExports.clear();
9122 DAG.setRoot(Result.second);
9140 if (!isMustTailCall &&
9141 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9147 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9148 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9157 bool isTailCall,
bool isMustTailCall,
9160 auto &
DL =
DAG.getDataLayout();
9167 const Value *SwiftErrorVal =
nullptr;
9174 const Value *V = *
I;
9177 if (V->getType()->isEmptyTy())
9182 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9194 Args.push_back(Entry);
9205 Value *V = Bundle->Inputs[0];
9207 Entry.IsCFGuardTarget =
true;
9208 Args.push_back(Entry);
9221 "Target doesn't support calls with kcfi operand bundles.");
9229 auto *Token = Bundle->Inputs[0].get();
9230 ConvControlToken =
getValue(Token);
9241 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9254 "This target doesn't support calls with ptrauth operand bundles.");
9258 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9260 if (Result.first.getNode()) {
9275 DAG.setRoot(CopyNode);
9291 LoadTy, Builder.DAG.getDataLayout()))
9292 return Builder.getValue(LoadCst);
9298 bool ConstantMemory =
false;
9301 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9302 Root = Builder.DAG.getEntryNode();
9303 ConstantMemory =
true;
9306 Root = Builder.DAG.getRoot();
9311 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9314 if (!ConstantMemory)
9315 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9321void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9324 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9335bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9336 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9337 const Value *
Size =
I.getArgOperand(2);
9340 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9346 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9350 if (Res.first.getNode()) {
9351 processIntegerCallValue(
I, Res.first,
true);
9365 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9366 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9388 switch (NumBitsToCompare) {
9400 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9413 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9414 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9418 processIntegerCallValue(
I, Cmp,
false);
9427bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9428 const Value *Src =
I.getArgOperand(0);
9429 const Value *
Char =
I.getArgOperand(1);
9430 const Value *
Length =
I.getArgOperand(2);
9432 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9433 std::pair<SDValue, SDValue> Res =
9436 MachinePointerInfo(Src));
9437 if (Res.first.getNode()) {
9451bool SelectionDAGBuilder::visitMemCCpyCall(
const CallInst &
I) {
9452 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9459 processIntegerCallValue(
I, Res.first,
true);
9471bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9476 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9477 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9479 Align Alignment = std::min(DstAlign, SrcAlign);
9488 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9489 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9490 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9492 "** memcpy should not be lowered as TailCall in mempcpy context **");
9496 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9509bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9510 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9512 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9515 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), isStpcpy, &
I);
9516 if (Res.first.getNode()) {
9518 DAG.setRoot(Res.second);
9530bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9531 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9533 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9536 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), &
I);
9537 if (Res.first.getNode()) {
9538 processIntegerCallValue(
I, Res.first,
true);
9551bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9552 const Value *Arg0 =
I.getArgOperand(0);
9554 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9557 if (Res.first.getNode()) {
9558 processIntegerCallValue(
I, Res.first,
false);
9571bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9572 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9574 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9575 std::pair<SDValue, SDValue> Res =
9578 MachinePointerInfo(Arg0));
9579 if (Res.first.getNode()) {
9580 processIntegerCallValue(
I, Res.first,
false);
9593bool SelectionDAGBuilder::visitStrstrCall(
const CallInst &
I) {
9594 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9595 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9599 processIntegerCallValue(
I, Res.first,
false);
9611bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9616 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9633bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9638 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9651void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9653 if (
I.isInlineAsm()) {
9660 if (Function *
F =
I.getCalledFunction()) {
9661 if (
F->isDeclaration()) {
9663 if (
unsigned IID =
F->getIntrinsicID()) {
9664 visitIntrinsicCall(
I, IID);
9675 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9676 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9680 if (visitMemCmpBCmpCall(
I))
9683 case LibFunc_copysign:
9684 case LibFunc_copysignf:
9685 case LibFunc_copysignl:
9688 if (
I.onlyReadsMemory()) {
9733 case LibFunc_atan2f:
9734 case LibFunc_atan2l:
9759 case LibFunc_sqrt_finite:
9760 case LibFunc_sqrtf_finite:
9761 case LibFunc_sqrtl_finite:
9778 case LibFunc_exp10f:
9779 case LibFunc_exp10l:
9784 case LibFunc_ldexpf:
9785 case LibFunc_ldexpl:
9789 case LibFunc_strstr:
9790 if (visitStrstrCall(
I))
9793 case LibFunc_memcmp:
9794 if (visitMemCmpBCmpCall(
I))
9797 case LibFunc_memccpy:
9798 if (visitMemCCpyCall(
I))
9801 case LibFunc_mempcpy:
9802 if (visitMemPCpyCall(
I))
9805 case LibFunc_memchr:
9806 if (visitMemChrCall(
I))
9809 case LibFunc_strcpy:
9810 if (visitStrCpyCall(
I,
false))
9813 case LibFunc_stpcpy:
9814 if (visitStrCpyCall(
I,
true))
9817 case LibFunc_strcmp:
9818 if (visitStrCmpCall(
I))
9821 case LibFunc_strlen:
9822 if (visitStrLenCall(
I))
9825 case LibFunc_strnlen:
9826 if (visitStrNLenCall(
I))
9850 if (
I.hasDeoptState())
9867 const Value *Discriminator = PAB->Inputs[1];
9869 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9870 assert(Discriminator->getType()->isIntegerTy(64) &&
9871 "Invalid ptrauth discriminator");
9876 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9877 DAG.getDataLayout()))
9917 for (
const auto &Code : Codes)
9932 SDISelAsmOperandInfo &MatchingOpInfo,
9934 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9940 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9942 OpInfo.ConstraintVT);
9943 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9945 MatchingOpInfo.ConstraintVT);
9946 const bool OutOpIsIntOrFP =
9947 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9948 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9949 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9950 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9953 " with a matching output constraint of"
9954 " incompatible type!");
9956 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9963 SDISelAsmOperandInfo &OpInfo,
9976 const Value *OpVal = OpInfo.CallOperandVal;
9994 DL.getPrefTypeAlign(Ty),
false,
9997 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
10000 OpInfo.CallOperand = StackSlot;
10013static std::optional<unsigned>
10015 SDISelAsmOperandInfo &OpInfo,
10016 SDISelAsmOperandInfo &RefOpInfo) {
10027 return std::nullopt;
10031 unsigned AssignedReg;
10034 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
10037 return std::nullopt;
10042 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
10044 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
10053 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
10058 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
10063 OpInfo.CallOperand =
10065 OpInfo.ConstraintVT = RegVT;
10069 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
10072 OpInfo.CallOperand =
10074 OpInfo.ConstraintVT = VT;
10081 if (OpInfo.isMatchingInputConstraint())
10082 return std::nullopt;
10084 EVT ValueVT = OpInfo.ConstraintVT;
10085 if (OpInfo.ConstraintVT == MVT::Other)
10089 unsigned NumRegs = 1;
10090 if (OpInfo.ConstraintVT != MVT::Other)
10105 I = std::find(
I, RC->
end(), AssignedReg);
10106 if (
I == RC->
end()) {
10109 return {AssignedReg};
10113 for (; NumRegs; --NumRegs, ++
I) {
10114 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10119 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10120 return std::nullopt;
10125 const std::vector<SDValue> &AsmNodeOperands) {
10128 for (; OperandNo; --OperandNo) {
10130 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10133 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10134 "Skipped past definitions?");
10135 CurOp +=
F.getNumOperandRegisters() + 1;
10143 unsigned Flags = 0;
10146 explicit ExtraFlags(
const CallBase &
Call) {
10148 if (
IA->hasSideEffects())
10150 if (
IA->isAlignStack())
10152 if (
IA->canThrow())
10159 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10175 unsigned get()
const {
return Flags; }
10198void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10205 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10207 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10211 bool HasSideEffect =
IA->hasSideEffects();
10212 ExtraFlags ExtraInfo(
Call);
10214 for (
auto &
T : TargetConstraints) {
10215 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10216 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10218 if (OpInfo.CallOperandVal)
10219 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10221 if (!HasSideEffect)
10222 HasSideEffect = OpInfo.hasMemory(TLI);
10234 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10235 "' expects an integer constant "
10238 ExtraInfo.update(
T);
10246 if (EmitEHLabels) {
10247 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10251 if (IsCallBr || EmitEHLabels) {
10259 if (EmitEHLabels) {
10260 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10265 IA->collectAsmStrs(AsmStrs);
10268 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10276 if (OpInfo.hasMatchingInput()) {
10277 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10308 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10311 OpInfo.isIndirect =
false;
10318 !OpInfo.isIndirect) {
10319 assert((OpInfo.isMultipleAlternative ||
10321 "Can only indirectify direct input operands!");
10327 OpInfo.CallOperandVal =
nullptr;
10330 OpInfo.isIndirect =
true;
10336 std::vector<SDValue> AsmNodeOperands;
10337 AsmNodeOperands.push_back(
SDValue());
10338 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10345 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10349 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10354 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10356 SDISelAsmOperandInfo &RefOpInfo =
10357 OpInfo.isMatchingInputConstraint()
10358 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10360 const auto RegError =
10363 const MachineFunction &MF =
DAG.getMachineFunction();
10365 const char *
RegName =
TRI.getName(*RegError);
10366 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10367 "' allocated for constraint '" +
10368 Twine(OpInfo.ConstraintCode) +
10369 "' does not match required type");
10373 auto DetectWriteToReservedRegister = [&]() {
10374 const MachineFunction &MF =
DAG.getMachineFunction();
10379 emitInlineAsmError(
Call,
"write to reserved register '" +
10388 !OpInfo.isMatchingInputConstraint())) &&
10389 "Only address as input operand is allowed.");
10391 switch (OpInfo.Type) {
10397 "Failed to convert memory constraint code to constraint id.");
10401 OpFlags.setMemConstraint(ConstraintID);
10402 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10404 AsmNodeOperands.push_back(OpInfo.CallOperand);
10409 if (OpInfo.AssignedRegs.
Regs.empty()) {
10410 emitInlineAsmError(
10411 Call,
"couldn't allocate output register for constraint '" +
10412 Twine(OpInfo.ConstraintCode) +
"'");
10416 if (DetectWriteToReservedRegister())
10430 SDValue InOperandVal = OpInfo.CallOperand;
10432 if (OpInfo.isMatchingInputConstraint()) {
10437 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10438 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10439 if (OpInfo.isIndirect) {
10441 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10442 "don't know how to handle tied "
10443 "indirect register inputs");
10448 MachineFunction &MF =
DAG.getMachineFunction();
10453 MVT RegVT =
R->getSimpleValueType(0);
10454 const TargetRegisterClass *RC =
10457 :
TRI.getMinimalPhysRegClass(TiedReg);
10458 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10461 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10465 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10467 OpInfo.getMatchedOperand(), dl,
DAG,
10472 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10473 assert(
Flag.getNumOperandRegisters() == 1 &&
10474 "Unexpected number of operands");
10477 Flag.clearMemConstraint();
10478 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10479 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10481 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10492 std::vector<SDValue>
Ops;
10498 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10499 Twine(OpInfo.ConstraintCode) +
"'");
10503 emitInlineAsmError(
Call,
10504 "invalid operand for inline asm constraint '" +
10505 Twine(OpInfo.ConstraintCode) +
"'");
10511 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10518 assert((OpInfo.isIndirect ||
10520 "Operand must be indirect to be a mem!");
10523 "Memory operands expect pointer values");
10528 "Failed to convert memory constraint code to constraint id.");
10532 ResOpType.setMemConstraint(ConstraintID);
10533 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10536 AsmNodeOperands.push_back(InOperandVal);
10544 "Failed to convert memory constraint code to constraint id.");
10548 SDValue AsmOp = InOperandVal;
10552 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10558 ResOpType.setMemConstraint(ConstraintID);
10560 AsmNodeOperands.push_back(
10563 AsmNodeOperands.push_back(AsmOp);
10569 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10570 Twine(OpInfo.ConstraintCode) +
"'");
10575 if (OpInfo.isIndirect) {
10576 emitInlineAsmError(
10577 Call,
"Don't know how to handle indirect register inputs yet "
10578 "for constraint '" +
10579 Twine(OpInfo.ConstraintCode) +
"'");
10584 if (OpInfo.AssignedRegs.
Regs.empty()) {
10585 emitInlineAsmError(
Call,
10586 "couldn't allocate input reg for constraint '" +
10587 Twine(OpInfo.ConstraintCode) +
"'");
10591 if (DetectWriteToReservedRegister())
10600 0, dl,
DAG, AsmNodeOperands);
10606 if (!OpInfo.AssignedRegs.
Regs.empty())
10616 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10620 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10632 ResultTypes = StructResult->elements();
10633 else if (!CallResultType->
isVoidTy())
10634 ResultTypes =
ArrayRef(CallResultType);
10636 auto CurResultType = ResultTypes.
begin();
10637 auto handleRegAssign = [&](
SDValue V) {
10638 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10639 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10640 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10652 if (ResultVT !=
V.getValueType() &&
10655 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10656 V.getValueType().isInteger()) {
10662 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10668 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10672 if (OpInfo.AssignedRegs.
Regs.empty())
10675 switch (OpInfo.ConstraintType) {
10679 Chain, &Glue, &
Call);
10691 assert(
false &&
"Unexpected unknown constraint");
10695 if (OpInfo.isIndirect) {
10696 const Value *Ptr = OpInfo.CallOperandVal;
10697 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10699 MachinePointerInfo(Ptr));
10706 handleRegAssign(V);
10708 handleRegAssign(Val);
10714 if (!ResultValues.
empty()) {
10715 assert(CurResultType == ResultTypes.
end() &&
10716 "Mismatch in number of ResultTypes");
10718 "Mismatch in number of output operands in asm result");
10721 DAG.getVTList(ResultVTs), ResultValues);
10726 if (!OutChains.
empty())
10729 if (EmitEHLabels) {
10734 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10736 DAG.setRoot(Chain);
10739void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10740 const Twine &Message) {
10741 LLVMContext &Ctx = *
DAG.getContext();
10745 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10749 if (ValueVTs.
empty())
10753 for (
const EVT &VT : ValueVTs)
10754 Ops.push_back(
DAG.getUNDEF(VT));
10759void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10763 DAG.getSrcValue(
I.getArgOperand(0))));
10766void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10767 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10768 const DataLayout &
DL =
DAG.getDataLayout();
10772 DL.getABITypeAlign(
I.getType()).value());
10773 DAG.setRoot(
V.getValue(1));
10775 if (
I.getType()->isPointerTy())
10776 V =
DAG.getPtrExtOrTrunc(
10781void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10785 DAG.getSrcValue(
I.getArgOperand(0))));
10788void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10793 DAG.getSrcValue(
I.getArgOperand(0)),
10794 DAG.getSrcValue(
I.getArgOperand(1))));
10800 std::optional<ConstantRange> CR =
getRange(
I);
10802 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10805 APInt Lo = CR->getUnsignedMin();
10806 if (!
Lo.isMinValue())
10809 APInt Hi = CR->getUnsignedMax();
10810 unsigned Bits = std::max(
Hi.getActiveBits(),
10818 DAG.getValueType(SmallVT));
10819 unsigned NumVals =
Op.getNode()->getNumValues();
10825 Ops.push_back(ZExt);
10826 for (
unsigned I = 1;
I != NumVals; ++
I)
10827 Ops.push_back(
Op.getValue(
I));
10829 return DAG.getMergeValues(
Ops,
SL);
10839 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10847 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10850 MergeOp, TestConst);
10853 return DAG.getMergeValues(
Ops,
SL);
10864 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10867 Args.reserve(NumArgs);
10871 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10872 ArgI != ArgE; ++ArgI) {
10873 const Value *V =
Call->getOperand(ArgI);
10875 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10878 Entry.setAttributes(
Call, ArgI);
10879 Args.push_back(Entry);
10884 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10913 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10922 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10928void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10954 Ops.push_back(Chain);
10955 Ops.push_back(InGlue);
10962 assert(
ID.getValueType() == MVT::i64);
10964 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10965 Ops.push_back(IDConst);
10971 Ops.push_back(ShadConst);
10977 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10981 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10986 DAG.setRoot(Chain);
10989 FuncInfo.MF->getFrameInfo().setHasStackMap();
10993void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
11010 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
11013 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
11014 SDLoc(SymbolicCallee),
11015 SymbolicCallee->getValueType(0));
11025 "Not enough arguments provided to the patchpoint intrinsic");
11028 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
11032 TargetLowering::CallLoweringInfo CLI(
DAG);
11037 SDNode *CallEnd =
Result.second.getNode();
11046 "Expected a callseq node.");
11048 bool HasGlue =
Call->getGluedNode();
11073 Ops.push_back(Callee);
11079 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
11080 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
11083 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
11088 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
11099 if (IsAnyRegCC && HasDef) {
11101 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11104 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
11109 NodeTys =
DAG.getVTList(ValueVTs);
11111 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11128 if (IsAnyRegCC && HasDef) {
11131 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11137 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11140void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11142 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11145 if (
I.arg_size() > 1)
11150 SDNodeFlags SDFlags;
11154 switch (Intrinsic) {
11155 case Intrinsic::vector_reduce_fadd:
11163 case Intrinsic::vector_reduce_fmul:
11171 case Intrinsic::vector_reduce_add:
11174 case Intrinsic::vector_reduce_mul:
11177 case Intrinsic::vector_reduce_and:
11180 case Intrinsic::vector_reduce_or:
11183 case Intrinsic::vector_reduce_xor:
11186 case Intrinsic::vector_reduce_smax:
11189 case Intrinsic::vector_reduce_smin:
11192 case Intrinsic::vector_reduce_umax:
11195 case Intrinsic::vector_reduce_umin:
11198 case Intrinsic::vector_reduce_fmax:
11201 case Intrinsic::vector_reduce_fmin:
11204 case Intrinsic::vector_reduce_fmaximum:
11207 case Intrinsic::vector_reduce_fminimum:
11221 Attrs.push_back(Attribute::SExt);
11223 Attrs.push_back(Attribute::ZExt);
11225 Attrs.push_back(Attribute::InReg);
11227 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11235std::pair<SDValue, SDValue>
11249 "Only supported for non-aggregate returns");
11252 for (
Type *Ty : RetOrigTys)
11261 RetOrigTys.
swap(OldRetOrigTys);
11262 RetVTs.
swap(OldRetVTs);
11263 Offsets.swap(OldOffsets);
11265 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11266 EVT RetVT = OldRetVTs[i];
11270 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11271 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11272 RetVTs.
append(NumRegs, RegisterVT);
11273 for (
unsigned j = 0; j != NumRegs; ++j)
11286 int DemoteStackIdx = -100;
11299 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11300 Entry.IsSRet =
true;
11301 Entry.Alignment = Alignment;
11313 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11315 if (NeedsRegBlock) {
11316 Flags.setInConsecutiveRegs();
11317 if (
I == RetVTs.
size() - 1)
11318 Flags.setInConsecutiveRegsLast();
11320 EVT VT = RetVTs[
I];
11324 for (
unsigned i = 0; i != NumRegs; ++i) {
11338 CLI.
Ins.push_back(Ret);
11347 if (Arg.IsSwiftError) {
11353 CLI.
Ins.push_back(Ret);
11361 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11365 Type *FinalType = Args[i].Ty;
11366 if (Args[i].IsByVal)
11367 FinalType = Args[i].IndirectType;
11370 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11373 Type *ArgTy = OrigArgTy;
11374 if (Args[i].Ty != Args[i].OrigTy) {
11375 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11376 ArgTy = Args[i].Ty;
11381 Args[i].Node.getResNo() +
Value);
11388 Flags.setOrigAlign(OriginalAlignment);
11393 Flags.setPointer();
11396 if (Args[i].IsZExt)
11398 if (Args[i].IsSExt)
11400 if (Args[i].IsNoExt)
11402 if (Args[i].IsInReg) {
11409 Flags.setHvaStart();
11415 if (Args[i].IsSRet)
11417 if (Args[i].IsSwiftSelf)
11418 Flags.setSwiftSelf();
11419 if (Args[i].IsSwiftAsync)
11420 Flags.setSwiftAsync();
11421 if (Args[i].IsSwiftError)
11422 Flags.setSwiftError();
11423 if (Args[i].IsCFGuardTarget)
11424 Flags.setCFGuardTarget();
11425 if (Args[i].IsByVal)
11427 if (Args[i].IsByRef)
11429 if (Args[i].IsPreallocated) {
11430 Flags.setPreallocated();
11438 if (Args[i].IsInAlloca) {
11439 Flags.setInAlloca();
11448 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11449 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11450 Flags.setByValSize(FrameSize);
11453 if (
auto MA = Args[i].Alignment)
11457 }
else if (
auto MA = Args[i].Alignment) {
11460 MemAlign = OriginalAlignment;
11462 Flags.setMemAlign(MemAlign);
11463 if (Args[i].IsNest)
11466 Flags.setInConsecutiveRegs();
11469 unsigned NumParts =
11474 if (Args[i].IsSExt)
11476 else if (Args[i].IsZExt)
11481 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11486 Args[i].Ty->getPointerAddressSpace())) &&
11487 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11500 CLI.
RetZExt == Args[i].IsZExt))
11501 Flags.setReturned();
11507 for (
unsigned j = 0; j != NumParts; ++j) {
11513 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11514 if (NumParts > 1 && j == 0)
11518 if (j == NumParts - 1)
11522 CLI.
Outs.push_back(MyFlags);
11523 CLI.
OutVals.push_back(Parts[j]);
11526 if (NeedsRegBlock &&
Value == NumValues - 1)
11527 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11539 "LowerCall didn't return a valid chain!");
11541 "LowerCall emitted a return value for a tail call!");
11543 "LowerCall didn't emit the correct number of values!");
11555 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11556 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11557 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11558 "LowerCall emitted a value with the wrong type!");
11568 unsigned NumValues = RetVTs.
size();
11569 ReturnValues.
resize(NumValues);
11576 for (
unsigned i = 0; i < NumValues; ++i) {
11583 DemoteStackIdx, Offsets[i]),
11585 ReturnValues[i] = L;
11586 Chains[i] = L.getValue(1);
11593 std::optional<ISD::NodeType> AssertOp;
11598 unsigned CurReg = 0;
11599 for (
EVT VT : RetVTs) {
11605 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11613 if (ReturnValues.
empty())
11619 return std::make_pair(Res, CLI.
Chain);
11636 if (
N->getNumValues() == 1) {
11644 "Lowering returned the wrong number of results!");
11647 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11661 "Copy from a reg to the same reg!");
11662 assert(!Reg.isPhysical() &&
"Is a physreg");
11668 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11673 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11674 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11675 ExtendType = PreferredExtendIt->second;
11678 PendingExports.push_back(Chain);
11690 return A->use_empty();
11692 const BasicBlock &Entry =
A->getParent()->front();
11693 for (
const User *U :
A->users())
11702 std::pair<const AllocaInst *, const StoreInst *>>;
11714 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11716 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11717 StaticAllocas.
reserve(NumArgs * 2);
11719 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11722 V = V->stripPointerCasts();
11724 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11727 return &Iter.first->second;
11744 if (
I.isDebugOrPseudoInst())
11748 for (
const Use &U :
I.operands()) {
11749 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11750 *Info = StaticAllocaInfo::Clobbered;
11756 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11757 *Info = StaticAllocaInfo::Clobbered;
11760 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11761 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11767 if (*Info != StaticAllocaInfo::Unknown)
11775 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11778 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11780 DL.getTypeStoreSize(Arg->
getType()) != *AllocaSize ||
11781 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11782 ArgCopyElisionCandidates.count(Arg)) {
11783 *Info = StaticAllocaInfo::Clobbered;
11787 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11791 *Info = StaticAllocaInfo::Elidable;
11792 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11797 if (ArgCopyElisionCandidates.size() == NumArgs)
11821 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11822 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11823 const AllocaInst *AI = ArgCopyIter->second.first;
11824 int FixedIndex = FINode->getIndex();
11826 int OldIndex = AllocaIndex;
11830 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11836 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11837 "greater than stack argument alignment ("
11838 <<
DebugStr(RequiredAlignment) <<
" vs "
11846 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11847 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11853 AllocaIndex = FixedIndex;
11854 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11855 for (
SDValue ArgVal : ArgVals)
11859 const StoreInst *
SI = ArgCopyIter->second.second;
11872void SelectionDAGISel::LowerArguments(
const Function &
F) {
11873 SelectionDAG &DAG =
SDB->DAG;
11874 SDLoc dl =
SDB->getCurSDLoc();
11879 if (
F.hasFnAttribute(Attribute::Naked))
11884 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11886 ISD::ArgFlagsTy
Flags;
11888 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11889 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11899 ArgCopyElisionCandidates);
11902 for (
const Argument &Arg :
F.args()) {
11903 unsigned ArgNo = Arg.getArgNo();
11906 bool isArgValueUsed = !Arg.
use_empty();
11908 if (Arg.hasAttribute(Attribute::ByVal))
11909 FinalType = Arg.getParamByValType();
11910 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11911 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11912 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11915 EVT VT =
TLI->getValueType(
DL, ArgTy);
11916 ISD::ArgFlagsTy
Flags;
11919 Flags.setPointer();
11922 if (Arg.hasAttribute(Attribute::ZExt))
11924 if (Arg.hasAttribute(Attribute::SExt))
11926 if (Arg.hasAttribute(Attribute::InReg)) {
11933 Flags.setHvaStart();
11939 if (Arg.hasAttribute(Attribute::StructRet))
11941 if (Arg.hasAttribute(Attribute::SwiftSelf))
11942 Flags.setSwiftSelf();
11943 if (Arg.hasAttribute(Attribute::SwiftAsync))
11944 Flags.setSwiftAsync();
11945 if (Arg.hasAttribute(Attribute::SwiftError))
11946 Flags.setSwiftError();
11947 if (Arg.hasAttribute(Attribute::ByVal))
11949 if (Arg.hasAttribute(Attribute::ByRef))
11951 if (Arg.hasAttribute(Attribute::InAlloca)) {
11952 Flags.setInAlloca();
11960 if (Arg.hasAttribute(Attribute::Preallocated)) {
11961 Flags.setPreallocated();
11973 const Align OriginalAlignment(
11974 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11975 Flags.setOrigAlign(OriginalAlignment);
11978 Type *ArgMemTy =
nullptr;
11979 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11982 ArgMemTy = Arg.getPointeeInMemoryValueType();
11984 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11989 if (
auto ParamAlign = Arg.getParamStackAlign())
11990 MemAlign = *ParamAlign;
11991 else if ((ParamAlign = Arg.getParamAlign()))
11992 MemAlign = *ParamAlign;
11994 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11995 if (
Flags.isByRef())
11996 Flags.setByRefSize(MemSize);
11998 Flags.setByValSize(MemSize);
11999 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
12000 MemAlign = *ParamAlign;
12002 MemAlign = OriginalAlignment;
12004 Flags.setMemAlign(MemAlign);
12006 if (Arg.hasAttribute(Attribute::Nest))
12009 Flags.setInConsecutiveRegs();
12010 if (ArgCopyElisionCandidates.count(&Arg))
12011 Flags.setCopyElisionCandidate();
12012 if (Arg.hasAttribute(Attribute::Returned))
12013 Flags.setReturned();
12015 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
12016 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12017 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
12018 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12019 for (
unsigned i = 0; i != NumRegs; ++i) {
12023 ISD::InputArg MyFlags(
12024 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
12026 if (NumRegs > 1 && i == 0)
12027 MyFlags.Flags.setSplit();
12030 MyFlags.Flags.setOrigAlign(
Align(1));
12031 if (i == NumRegs - 1)
12032 MyFlags.Flags.setSplitEnd();
12036 if (NeedsRegBlock &&
Value == NumValues - 1)
12037 Ins[Ins.
size() - 1].Flags.setInConsecutiveRegsLast();
12043 SDValue NewRoot =
TLI->LowerFormalArguments(
12044 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
12048 "LowerFormalArguments didn't return a valid chain!");
12050 "LowerFormalArguments didn't emit the correct number of values!");
12052 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
12054 "LowerFormalArguments emitted a null value!");
12056 "LowerFormalArguments emitted a value with the wrong type!");
12068 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
12069 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
12070 std::optional<ISD::NodeType> AssertOp;
12073 F.getCallingConv(), AssertOp);
12075 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
12076 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
12078 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
12079 FuncInfo->DemoteRegister = SRetReg;
12081 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
12089 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
12090 for (
const Argument &Arg :
F.args()) {
12094 unsigned NumValues = ValueVTs.
size();
12095 if (NumValues == 0)
12102 if (Ins[i].
Flags.isCopyElisionCandidate()) {
12103 unsigned NumParts = 0;
12104 for (EVT VT : ValueVTs)
12105 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
12106 F.getCallingConv(), VT);
12110 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12115 bool isSwiftErrorArg =
12116 TLI->supportSwiftError() &&
12117 Arg.hasAttribute(Attribute::SwiftError);
12118 if (!ArgHasUses && !isSwiftErrorArg) {
12119 SDB->setUnusedArgValue(&Arg, InVals[i]);
12122 if (FrameIndexSDNode *FI =
12124 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12127 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12128 EVT VT = ValueVTs[Val];
12129 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12130 F.getCallingConv(), VT);
12131 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12132 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12137 if (ArgHasUses || isSwiftErrorArg) {
12138 std::optional<ISD::NodeType> AssertOp;
12139 if (Arg.hasAttribute(Attribute::SExt))
12141 else if (Arg.hasAttribute(Attribute::ZExt))
12146 NewRoot,
F.getCallingConv(), AssertOp);
12149 if (NoFPClass !=
fcNone) {
12151 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12153 OutVal, SDNoFPClass);
12162 if (ArgValues.
empty())
12166 if (FrameIndexSDNode *FI =
12168 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12171 SDB->getCurSDLoc());
12173 SDB->setValue(&Arg, Res);
12183 if (LoadSDNode *LNode =
12185 if (FrameIndexSDNode *FI =
12187 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12215 FuncInfo->InitializeRegForValue(&Arg);
12216 SDB->CopyToExportRegsIfNeeded(&Arg);
12220 if (!Chains.
empty()) {
12227 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12231 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12232 for (MachineFunction::VariableDbgInfo &VI :
12233 MF->getInStackSlotVariableDbgInfo()) {
12234 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12235 if (
I != ArgCopyElisionFrameIndexMap.
end())
12236 VI.updateStackSlot(
I->second);
12251SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12252 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12254 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12260 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12264 if (!SuccsHandled.
insert(SuccMBB).second)
12272 for (
const PHINode &PN : SuccBB->phis()) {
12274 if (PN.use_empty())
12278 if (PN.getType()->isEmptyTy())
12282 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12287 RegOut =
FuncInfo.CreateRegs(&PN);
12305 "Didn't codegen value into a register!??");
12315 for (EVT VT : ValueVTs) {
12317 for (
unsigned i = 0; i != NumRegisters; ++i)
12319 Reg += NumRegisters;
12339void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12341 if (MaybeTC.
getNode() !=
nullptr)
12342 DAG.setRoot(MaybeTC);
12347void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12350 MachineFunction *CurMF =
FuncInfo.MF;
12351 MachineBasicBlock *NextMBB =
nullptr;
12356 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12358 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12360 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12368 CaseCluster &
Small = *
W.FirstCluster;
12369 CaseCluster &
Big = *
W.LastCluster;
12373 const APInt &SmallValue =
Small.Low->getValue();
12374 const APInt &BigValue =
Big.Low->getValue();
12377 APInt CommonBit = BigValue ^ SmallValue;
12384 DAG.getConstant(CommonBit,
DL, VT));
12386 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12392 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12394 addSuccessorWithProb(
12395 SwitchMBB, DefaultMBB,
12399 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12407 DAG.getBasicBlock(DefaultMBB));
12409 DAG.setRoot(BrCond);
12421 [](
const CaseCluster &a,
const CaseCluster &b) {
12422 return a.Prob != b.Prob ?
12424 a.Low->getValue().slt(b.Low->getValue());
12431 if (
I->Prob >
W.LastCluster->Prob)
12433 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12441 BranchProbability DefaultProb =
W.DefaultProb;
12442 BranchProbability UnhandledProbs = DefaultProb;
12444 UnhandledProbs +=
I->Prob;
12446 MachineBasicBlock *CurMBB =
W.MBB;
12448 bool FallthroughUnreachable =
false;
12449 MachineBasicBlock *Fallthrough;
12450 if (
I ==
W.LastCluster) {
12452 Fallthrough = DefaultMBB;
12457 CurMF->
insert(BBI, Fallthrough);
12461 UnhandledProbs -=
I->Prob;
12466 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12467 SwitchCG::JumpTable *JT = &
SL->JTCases[
I->JTCasesIndex].second;
12470 MachineBasicBlock *JumpMBB = JT->
MBB;
12471 CurMF->
insert(BBI, JumpMBB);
12473 auto JumpProb =
I->Prob;
12474 auto FallthroughProb = UnhandledProbs;
12482 if (*SI == DefaultMBB) {
12483 JumpProb += DefaultProb / 2;
12484 FallthroughProb -= DefaultProb / 2;
12502 if (FallthroughUnreachable) {
12509 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12510 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12519 if (CurMBB == SwitchMBB) {
12527 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12530 for (BitTestCase &BTC : BTB->
Cases)
12542 BTB->
Prob += DefaultProb / 2;
12546 if (FallthroughUnreachable)
12550 if (CurMBB == SwitchMBB) {
12557 const Value *
RHS, *
LHS, *MHS;
12559 if (
I->Low ==
I->High) {
12574 if (FallthroughUnreachable)
12578 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12581 if (CurMBB == SwitchMBB)
12584 SL->SwitchCases.push_back(CB);
12589 CurMBB = Fallthrough;
12593void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12594 const SwitchWorkListItem &W,
12597 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12598 "Clusters not sorted?");
12599 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12601 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12602 SL->computeSplitWorkItemInfo(W);
12607 assert(PivotCluster >
W.FirstCluster);
12608 assert(PivotCluster <=
W.LastCluster);
12613 const ConstantInt *Pivot = PivotCluster->Low;
12622 MachineBasicBlock *LeftMBB;
12623 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12624 FirstLeft->Low ==
W.GE &&
12625 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12626 LeftMBB = FirstLeft->MBB;
12628 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12629 FuncInfo.MF->insert(BBI, LeftMBB);
12631 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12639 MachineBasicBlock *RightMBB;
12640 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12641 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12642 RightMBB = FirstRight->MBB;
12644 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12645 FuncInfo.MF->insert(BBI, RightMBB);
12647 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12653 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12656 if (
W.MBB == SwitchMBB)
12659 SL->SwitchCases.push_back(CB);
12684 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12692 unsigned PeeledCaseIndex = 0;
12693 bool SwitchPeeled =
false;
12694 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12695 CaseCluster &CC = Clusters[
Index];
12696 if (CC.
Prob < TopCaseProb)
12698 TopCaseProb = CC.
Prob;
12699 PeeledCaseIndex =
Index;
12700 SwitchPeeled =
true;
12705 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12706 << TopCaseProb <<
"\n");
12711 MachineBasicBlock *PeeledSwitchMBB =
12713 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12716 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12717 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12718 nullptr,
nullptr, TopCaseProb.
getCompl()};
12719 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12721 Clusters.erase(PeeledCaseIt);
12722 for (CaseCluster &CC : Clusters) {
12724 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12725 << CC.
Prob <<
"\n");
12729 PeeledCaseProb = TopCaseProb;
12730 return PeeledSwitchMBB;
12733void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12735 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12737 Clusters.reserve(
SI.getNumCases());
12738 for (
auto I :
SI.cases()) {
12739 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12740 const ConstantInt *CaseVal =
I.getCaseValue();
12741 BranchProbability Prob =
12743 : BranchProbability(1,
SI.getNumCases() + 1);
12747 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12756 MachineBasicBlock *PeeledSwitchMBB =
12757 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12760 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12761 if (Clusters.empty()) {
12762 assert(PeeledSwitchMBB == SwitchMBB);
12764 if (DefaultMBB != NextBlock(SwitchMBB)) {
12771 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12773 SL->findBitTestClusters(Clusters, &SI);
12776 dbgs() <<
"Case clusters: ";
12777 for (
const CaseCluster &
C : Clusters) {
12783 C.Low->getValue().print(
dbgs(),
true);
12784 if (
C.Low !=
C.High) {
12786 C.High->getValue().print(
dbgs(),
true);
12793 assert(!Clusters.empty());
12797 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12801 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12804 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12806 while (!WorkList.
empty()) {
12808 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12813 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12817 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12821void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12822 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12828void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12829 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12834 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12843 SmallVector<int, 8>
Mask;
12845 for (
unsigned i = 0; i != NumElts; ++i)
12846 Mask.push_back(NumElts - 1 - i);
12851void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12860 EVT OutVT = ValueVTs[0];
12864 for (
unsigned i = 0; i != Factor; ++i) {
12865 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12867 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12873 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12875 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12883 DAG.getVTList(ValueVTs), SubVecs);
12887void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12890 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12895 for (
unsigned i = 0; i < Factor; ++i) {
12898 "Expected VTs to be the same");
12916 for (
unsigned i = 0; i < Factor; ++i)
12923void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12927 unsigned NumValues = ValueVTs.
size();
12928 if (NumValues == 0)
return;
12933 for (
unsigned i = 0; i != NumValues; ++i)
12938 DAG.getVTList(ValueVTs), Values));
12941void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12942 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12948 const bool IsLeft =
I.getIntrinsicID() == Intrinsic::vector_splice_left;
12963 uint64_t Idx = IsLeft ?
Imm : NumElts -
Imm;
12966 SmallVector<int, 8>
Mask;
12967 for (
unsigned i = 0; i < NumElts; ++i)
12968 Mask.push_back(Idx + i);
12996 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12997 "start of copy chain MUST be COPY");
12998 Reg =
MI->getOperand(1).getReg();
13001 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
13005 if (
MI->getOpcode() == TargetOpcode::COPY) {
13006 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
13007 Reg =
MI->getOperand(1).getReg();
13008 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
13011 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
13012 "end of copy chain MUST be INLINEASM_BR");
13022void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
13028 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
13029 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
13030 MachineRegisterInfo &MRI =
DAG.getMachineFunction().getRegInfo();
13038 for (
auto &
T : TargetConstraints) {
13039 SDISelAsmOperandInfo OpInfo(
T);
13047 switch (OpInfo.ConstraintType) {
13058 FuncInfo.MBB->addLiveIn(OriginalDef);
13066 ResultVTs.
push_back(OpInfo.ConstraintVT);
13075 ResultVTs.
push_back(OpInfo.ConstraintVT);
13083 DAG.getVTList(ResultVTs), ResultValues);
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const uint8_t *MatcherTable, size_t &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
uint16_t RegSizeInBits(const MCRegisterInfo &MRI, MCRegister RegNo)
static const fltSemantics & IEEEsingle()
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
static LLVM_ABI const fltSemantics * getArbitraryFPSemantics(StringRef Format)
Returns the fltSemantics for a given arbitrary FP format string, or nullptr if invalid.
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Conditional Branch instruction.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemccpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI) const
Emit target-specific code that performs a memccpy, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrstr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, const CallInst *CI) const
Emit target-specific code that performs a strstr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo, const CallInst *CI) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy, const CallInst *CI) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Function * getSSPStackGuardCheck(const Module &M, const LibcallLoweringInfo &Libcalls) const
If the target has a standard stack protection check function that performs validation and error handl...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual Value * getSDagStackGuard(const Module &M, const LibcallLoweringInfo &Libcalls) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Unconditional Branch instruction.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CONVERGENCECTRL_ANCHOR
The llvm.experimental.convergence.* intrinsics.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ COND_LOOP
COND_LOOP is a conditional branch to self, used for implementing efficient conditional traps.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ CLMUL
Carry-less multiplication operations.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ CONVERT_FROM_ARBITRARY_FP
CONVERT_FROM_ARBITRARY_FP - This operator converts from an arbitrary floating-point represented as an...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ CONVERGENCECTRL_GLUE
This does not correspond to any convergence control intrinsic.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ PATCHPOINT
The llvm.experimental.patchpoint.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ RELOC_NONE
Issue a no-op relocation against a given symbol at the current location.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ STACKMAP
The llvm.experimental.stackmap intrinsic.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.
std::optional< SDLoc > SL
The debug location of the instruction this JumpTable was produced from.
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)