79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID> CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT, CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
558 assert(NumParts == 1 && PartEVT != ValueVT);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
664 }
else if (PartEVT != ValueEVT) {
679 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
690 std::optional<CallingConv::ID> CallConv) {
694 const bool IsABIRegCopy = CallConv.has_value();
697 EVT PartEVT = PartVT;
698 if (PartEVT == ValueVT) {
744 "lossy conversion of vector to scalar type");
759 unsigned NumIntermediates;
763 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
768 NumIntermediates, RegisterVT);
771 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
773 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
776 "Mixing scalable and fixed vectors when copying in parts");
778 std::optional<ElementCount> DestEltCnt;
788 if (ValueVT == BuiltVectorTy) {
812 for (
unsigned i = 0; i != NumIntermediates; ++i) {
827 if (NumParts == NumIntermediates) {
830 for (
unsigned i = 0; i != NumParts; ++i)
832 }
else if (NumParts > 0) {
835 assert(NumIntermediates != 0 &&
"division by zero");
836 assert(NumParts % NumIntermediates == 0 &&
837 "Must expand into a divisible number of parts!");
838 unsigned Factor = NumParts / NumIntermediates;
839 for (
unsigned i = 0; i != NumIntermediates; ++i)
847 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
851 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
854 OS << LS << U.getTagName();
857 Twine(
"cannot lower ", Name)
863 EVT valuevt, std::optional<CallingConv::ID> CC)
869 std::optional<CallingConv::ID> CC) {
883 for (
unsigned i = 0; i != NumRegs; ++i)
884 Regs.push_back(Reg + i);
885 RegVTs.push_back(RegisterVT);
887 Reg = Reg.id() + NumRegs;
914 for (
unsigned i = 0; i != NumRegs; ++i) {
920 *Glue =
P.getValue(2);
923 Chain =
P.getValue(1);
951 EVT FromVT(MVT::Other);
955 }
else if (NumSignBits > 1) {
963 assert(FromVT != MVT::Other);
969 RegisterVT, ValueVT, V, Chain,
CallConv);
985 unsigned NumRegs =
Regs.size();
999 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1005 for (
unsigned i = 0; i != NumRegs; ++i) {
1017 if (NumRegs == 1 || Glue)
1028 Chain = Chains[NumRegs-1];
1034 unsigned MatchingIdx,
const SDLoc &dl,
1036 std::vector<SDValue> &
Ops)
const {
1041 Flag.setMatchingOp(MatchingIdx);
1042 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1050 Flag.setRegClass(RC->
getID());
1061 "No 1:1 mapping from clobbers to regs?");
1064 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1069 "If we clobbered the stack pointer, MFI should know about it.");
1078 for (
unsigned i = 0; i != NumRegs; ++i) {
1079 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1091 unsigned RegCount = std::get<0>(CountAndVT);
1092 MVT RegisterVT = std::get<1>(CountAndVT);
1110 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1112 *
DAG.getMachineFunction().getFunction().getParent());
1117 UnusedArgNodeMap.clear();
1119 PendingExports.clear();
1120 PendingConstrainedFP.clear();
1121 PendingConstrainedFPStrict.clear();
1129 DanglingDebugInfoMap.clear();
1136 if (Pending.
empty())
1142 unsigned i = 0, e = Pending.
size();
1143 for (; i != e; ++i) {
1145 if (Pending[i].
getNode()->getOperand(0) == Root)
1153 if (Pending.
size() == 1)
1180 if (!PendingConstrainedFPStrict.empty()) {
1181 assert(PendingConstrainedFP.empty());
1182 updateRoot(PendingConstrainedFPStrict);
1195 if (!PendingConstrainedFP.empty()) {
1196 assert(PendingConstrainedFPStrict.empty());
1197 updateRoot(PendingConstrainedFP);
1201 return DAG.getRoot();
1209 PendingConstrainedFP.size() +
1210 PendingConstrainedFPStrict.size());
1212 PendingConstrainedFP.end());
1213 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1214 PendingConstrainedFPStrict.end());
1215 PendingConstrainedFP.clear();
1216 PendingConstrainedFPStrict.clear();
1223 PendingExports.append(PendingConstrainedFPStrict.begin(),
1224 PendingConstrainedFPStrict.end());
1225 PendingConstrainedFPStrict.clear();
1226 return updateRoot(PendingExports);
1233 assert(Variable &&
"Missing variable");
1240 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1256 if (IsParameter && FINode) {
1258 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1259 true,
DL, SDNodeOrder);
1264 FuncArgumentDbgValueKind::Declare,
N);
1267 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1268 true,
DL, SDNodeOrder);
1270 DAG.AddDbgValue(SDV, IsParameter);
1275 FuncArgumentDbgValueKind::Declare,
N)) {
1277 <<
" (could not emit func-arg dbg_value)\n");
1288 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1290 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1292 if (It->Values.isKillLocation(It->Expr)) {
1298 It->Values.hasArgList())) {
1301 FnVarLocs->getDILocalVariable(It->VariableID),
1302 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1315 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1318 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1320 assert(DLR->getLabel() &&
"Missing label");
1322 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1323 DAG.AddDbgLabel(SDV);
1327 if (SkipDbgVariableRecords)
1335 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1337 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1346 if (Values.
empty()) {
1363 SDNodeOrder, IsVariadic)) {
1374 if (
I.isTerminator()) {
1375 HandlePHINodesInSuccessorBlocks(
I.getParent());
1382 bool NodeInserted =
false;
1383 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1384 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1385 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1386 if (PCSectionsMD || MMRA) {
1387 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1388 DAG, [&](
SDNode *) { NodeInserted =
true; });
1398 if (PCSectionsMD || MMRA) {
1399 auto It = NodeMap.find(&
I);
1400 if (It != NodeMap.end()) {
1402 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1404 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1405 }
else if (NodeInserted) {
1408 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1409 <<
I.getModule()->getName() <<
"]\n";
1418void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1428#define HANDLE_INST(NUM, OPCODE, CLASS) \
1429 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1430#include "llvm/IR/Instruction.def"
1442 for (
const Value *V : Values) {
1467 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1472 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1473 DIVariable *DanglingVariable = DDI.getVariable();
1475 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1477 << printDDI(
nullptr, DDI) <<
"\n");
1483 for (
auto &DDIMI : DanglingDebugInfoMap) {
1484 DanglingDebugInfoVector &DDIV = DDIMI.second;
1488 for (
auto &DDI : DDIV)
1489 if (isMatchingDbgValue(DDI))
1492 erase_if(DDIV, isMatchingDbgValue);
1500 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1501 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1504 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1505 for (
auto &DDI : DDIV) {
1508 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1511 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1512 "Expected inlined-at fields to agree");
1521 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1522 FuncArgumentDbgValueKind::Value, Val)) {
1524 << printDDI(V, DDI) <<
"\n");
1531 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1532 << ValSDNodeOrder <<
"\n");
1533 SDV = getDbgValue(Val, Variable, Expr,
DL,
1534 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1535 DAG.AddDbgValue(SDV,
false);
1539 <<
" in EmitFuncArgumentDbgValue\n");
1541 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1545 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1546 DAG.AddDbgValue(SDV,
false);
1553 DanglingDebugInfo &DDI) {
1558 const Value *OrigV = V;
1562 unsigned SDOrder = DDI.getSDNodeOrder();
1566 bool StackValue =
true;
1591 if (!AdditionalValues.
empty())
1601 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1602 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1610 assert(OrigV &&
"V shouldn't be null");
1612 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1613 DAG.AddDbgValue(SDV,
false);
1615 << printDDI(OrigV, DDI) <<
"\n");
1632 unsigned Order,
bool IsVariadic) {
1637 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1642 for (
const Value *V : Values) {
1652 if (CE->getOpcode() == Instruction::IntToPtr) {
1671 N = UnusedArgNodeMap[V];
1676 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1677 FuncArgumentDbgValueKind::Value,
N))
1704 bool IsParamOfFunc =
1712 auto VMI =
FuncInfo.ValueMap.find(V);
1713 if (VMI !=
FuncInfo.ValueMap.end()) {
1718 V->getType(), std::nullopt);
1724 unsigned BitsToDescribe = 0;
1726 BitsToDescribe = *VarSize;
1728 BitsToDescribe = Fragment->SizeInBits;
1731 if (
Offset >= BitsToDescribe)
1734 unsigned RegisterSize = RegAndSize.second;
1735 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1736 ? BitsToDescribe -
Offset
1739 Expr,
Offset, FragmentSize);
1743 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1744 DAG.AddDbgValue(SDV,
false);
1760 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1761 false, DbgLoc, Order, IsVariadic);
1762 DAG.AddDbgValue(SDV,
false);
1768 for (
auto &Pair : DanglingDebugInfoMap)
1769 for (
auto &DDI : Pair.second)
1780 if (It !=
FuncInfo.ValueMap.end()) {
1784 DAG.getDataLayout(), InReg, Ty,
1801 if (
N.getNode())
return N;
1861 return DAG.getSplatBuildVector(
1864 return DAG.getConstant(*CI,
DL, VT);
1873 getValue(CPA->getAddrDiscriminator()),
1874 getValue(CPA->getDiscriminator()));
1890 visit(CE->getOpcode(), *CE);
1892 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1898 for (
const Use &U :
C->operands()) {
1904 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1905 Constants.push_back(
SDValue(Val, i));
1914 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1918 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1927 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1929 "Unknown struct or array constant!");
1933 unsigned NumElts = ValueVTs.
size();
1937 for (
unsigned i = 0; i != NumElts; ++i) {
1938 EVT EltVT = ValueVTs[i];
1940 Constants[i] =
DAG.getUNDEF(EltVT);
1951 return DAG.getBlockAddress(BA, VT);
1954 return getValue(Equiv->getGlobalValue());
1959 if (VT == MVT::aarch64svcount) {
1960 assert(
C->isNullValue() &&
"Can only zero this target type!");
1966 assert(
C->isNullValue() &&
"Can only zero this target type!");
1983 for (
unsigned i = 0; i != NumElements; ++i)
2011 return DAG.getFrameIndex(
2019 std::optional<CallingConv::ID> CallConv;
2021 if (CB && !CB->isInlineAsm())
2022 CallConv = CB->getCallingConv();
2025 Inst->getType(), CallConv);
2039void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2052 if (IsMSVCCXX || IsCoreCLR)
2058 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2059 FuncInfo.MBB->addSuccessor(TargetMBB);
2066 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2075 DAG.getMachineFunction().setHasEHContTarget(
true);
2081 Value *ParentPad =
I.getCatchSwitchParentPad();
2084 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2087 assert(SuccessorColor &&
"No parent funclet for catchret!");
2088 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2089 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2094 DAG.getBasicBlock(SuccessorColorMBB));
2098void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2104 FuncInfo.MBB->setIsEHFuncletEntry();
2105 FuncInfo.MBB->setIsCleanupFuncletEntry();
2134 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2140 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2141 UnwindDests.back().first->setIsEHScopeEntry();
2144 UnwindDests.back().first->setIsEHFuncletEntry();
2148 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2149 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2151 if (IsMSVCCXX || IsCoreCLR)
2152 UnwindDests.back().first->setIsEHFuncletEntry();
2154 UnwindDests.back().first->setIsEHScopeEntry();
2156 NewEHPadBB = CatchSwitch->getUnwindDest();
2162 if (BPI && NewEHPadBB)
2164 EHPadBB = NewEHPadBB;
2171 auto UnwindDest =
I.getUnwindDest();
2172 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2173 BranchProbability UnwindDestProb =
2178 for (
auto &UnwindDest : UnwindDests) {
2179 UnwindDest.first->setIsEHPad();
2180 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2182 FuncInfo.MBB->normalizeSuccProbs();
2185 MachineBasicBlock *CleanupPadMBB =
2186 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2192void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2196void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2197 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2198 auto &
DL =
DAG.getDataLayout();
2210 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2227 SmallVector<uint64_t, 4>
Offsets;
2230 unsigned NumValues = ValueVTs.
size();
2233 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2234 for (
unsigned i = 0; i != NumValues; ++i) {
2241 if (MemVTs[i] != ValueVTs[i])
2243 Chains[i] =
DAG.getStore(
2251 MVT::Other, Chains);
2252 }
else if (
I.getNumOperands() != 0) {
2255 unsigned NumValues =
Types.size();
2259 const Function *
F =
I.getParent()->getParent();
2262 I.getOperand(0)->getType(),
F->getCallingConv(),
2266 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2268 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2271 LLVMContext &
Context =
F->getContext();
2272 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2274 for (
unsigned j = 0;
j != NumValues; ++
j) {
2287 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2290 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2294 if (
I.getOperand(0)->getType()->isPointerTy()) {
2296 Flags.setPointerAddrSpace(
2300 if (NeedsRegBlock) {
2301 Flags.setInConsecutiveRegs();
2302 if (j == NumValues - 1)
2303 Flags.setInConsecutiveRegsLast();
2311 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2314 for (
unsigned i = 0; i < NumParts; ++i) {
2317 VT, Types[j], 0, 0));
2327 const Function *
F =
I.getParent()->getParent();
2329 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2331 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2332 Flags.setSwiftError();
2344 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2346 DAG.getMachineFunction().getFunction().getCallingConv();
2347 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2352 "LowerReturn didn't return a valid chain!");
2363 if (V->getType()->isEmptyTy())
2367 if (VMI !=
FuncInfo.ValueMap.end()) {
2369 "Unused value assigned virtual registers!");
2382 if (
FuncInfo.isExportedInst(V))
return;
2394 if (VI->getParent() == FromBB)
2420 const BasicBlock *SrcBB = Src->getBasicBlock();
2421 const BasicBlock *DstBB = Dst->getBasicBlock();
2425 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2435 Src->addSuccessorWithoutProb(Dst);
2438 Prob = getEdgeProbability(Src, Dst);
2439 Src->addSuccessor(Dst, Prob);
2445 return I->getParent() == BB;
2469 if (CurBB == SwitchBB ||
2475 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2480 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2482 if (TM.Options.NoNaNsFPMath)
2486 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2488 SL->SwitchCases.push_back(CB);
2497 SL->SwitchCases.push_back(CB);
2505 unsigned Depth = 0) {
2514 if (Necessary !=
nullptr) {
2517 if (Necessary->contains(
I))
2536 if (
I.getNumSuccessors() != 2)
2539 if (!
I.isConditional())
2551 if (BPI !=
nullptr) {
2557 std::optional<bool> Likely;
2560 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2564 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2576 if (CostThresh <= 0)
2597 Value *BrCond =
I.getCondition();
2598 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2599 for (
const auto *U : Ins->users()) {
2602 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2615 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2617 for (
const auto &InsPair : RhsDeps) {
2618 if (!ShouldCountInsn(InsPair.first)) {
2619 ToDrop = InsPair.first;
2623 if (ToDrop ==
nullptr)
2625 RhsDeps.erase(ToDrop);
2628 for (
const auto &InsPair : RhsDeps) {
2633 CostOfIncluding +=
TTI->getInstructionCost(
2636 if (CostOfIncluding > CostThresh)
2662 const Value *BOpOp0, *BOpOp1;
2676 if (BOpc == Instruction::And)
2677 BOpc = Instruction::Or;
2678 else if (BOpc == Instruction::Or)
2679 BOpc = Instruction::And;
2685 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2690 TProb, FProb, InvertCond);
2700 if (
Opc == Instruction::Or) {
2721 auto NewTrueProb = TProb / 2;
2722 auto NewFalseProb = TProb / 2 + FProb;
2725 NewFalseProb, InvertCond);
2732 Probs[1], InvertCond);
2734 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2754 auto NewTrueProb = TProb + FProb / 2;
2755 auto NewFalseProb = FProb / 2;
2758 NewFalseProb, InvertCond);
2765 Probs[1], InvertCond);
2774 if (Cases.size() != 2)
return true;
2778 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2779 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2780 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2781 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2787 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2788 Cases[0].CC == Cases[1].CC &&
2791 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2793 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2800void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2806 if (
I.isUnconditional()) {
2812 if (Succ0MBB != NextBlock(BrMBB) ||
2825 const Value *CondVal =
I.getCondition();
2826 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2845 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2847 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2850 const Value *BOp0, *BOp1;
2853 Opcode = Instruction::And;
2855 Opcode = Instruction::Or;
2862 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2863 Opcode, BOp0, BOp1))) {
2865 getEdgeProbability(BrMBB, Succ0MBB),
2866 getEdgeProbability(BrMBB, Succ1MBB),
2871 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2875 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2882 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2888 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2889 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2891 SL->SwitchCases.clear();
2897 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2918 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2925 auto &TLI =
DAG.getTargetLoweringInfo();
2949 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2961 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2965 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2966 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2981 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2997 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3000 DAG.setRoot(BrCond);
3006 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3007 assert(JT.Reg &&
"Should lower JT Header first!");
3008 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3010 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
3012 Index.getValue(1), Table, Index);
3013 DAG.setRoot(BrJumpTable);
3021 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3022 const SDLoc &dl = *JT.SL;
3028 DAG.getConstant(JTH.
First, dl, VT));
3043 JT.Reg = JumpTableReg;
3051 Sub.getValueType()),
3055 MVT::Other, CopyTo, CMP,
3056 DAG.getBasicBlock(JT.Default));
3059 if (JT.MBB != NextBlock(SwitchBB))
3060 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3061 DAG.getBasicBlock(JT.MBB));
3063 DAG.setRoot(BrCond);
3066 if (JT.MBB != NextBlock(SwitchBB))
3068 DAG.getBasicBlock(JT.MBB)));
3070 DAG.setRoot(CopyTo);
3093 if (PtrTy != PtrMemTy)
3109 auto &
DL =
DAG.getDataLayout();
3118 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3125 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3138 assert(GuardCheckFn &&
"Guard check function is null");
3149 Entry.IsInReg =
true;
3150 Args.push_back(Entry);
3156 getValue(GuardCheckFn), std::move(Args));
3158 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3159 DAG.setRoot(Result.second);
3171 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3177 Guard =
DAG.getPOISON(PtrMemTy);
3220 auto &
DL =
DAG.getDataLayout();
3228 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3234 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3249 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3250 Entry.IsInReg =
true;
3251 Args.push_back(Entry);
3257 getValue(GuardCheckFn), std::move(Args));
3263 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3286 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3290 bool UsePtrType =
false;
3314 if (!
B.FallthroughUnreachable)
3315 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3316 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3320 if (!
B.FallthroughUnreachable) {
3329 DAG.getBasicBlock(
B.Default));
3333 if (
MBB != NextBlock(SwitchBB))
3351 if (PopCount == 1) {
3358 }
else if (PopCount == BB.
Range) {
3366 DAG.getConstant(1, dl, VT), ShiftOp);
3370 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3377 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3379 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3387 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3390 if (NextMBB != NextBlock(SwitchBB))
3391 BrAnd =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrAnd,
3392 DAG.getBasicBlock(NextMBB));
3397void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3415 const Value *Callee(
I.getCalledOperand());
3418 visitInlineAsm(
I, EHPadBB);
3423 case Intrinsic::donothing:
3425 case Intrinsic::seh_try_begin:
3426 case Intrinsic::seh_scope_begin:
3427 case Intrinsic::seh_try_end:
3428 case Intrinsic::seh_scope_end:
3434 case Intrinsic::experimental_patchpoint_void:
3435 case Intrinsic::experimental_patchpoint:
3436 visitPatchpoint(
I, EHPadBB);
3438 case Intrinsic::experimental_gc_statepoint:
3444 case Intrinsic::wasm_throw: {
3446 std::array<SDValue, 4>
Ops = {
3457 case Intrinsic::wasm_rethrow: {
3458 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3459 std::array<SDValue, 2>
Ops = {
3468 }
else if (
I.hasDeoptState()) {
3489 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3490 BranchProbability EHPadBBProb =
3496 addSuccessorWithProb(InvokeMBB, Return);
3497 for (
auto &UnwindDest : UnwindDests) {
3498 UnwindDest.first->setIsEHPad();
3499 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3505 DAG.getBasicBlock(Return)));
3514void SelectionDAGBuilder::visitCallBrIntrinsic(
const CallBrInst &
I) {
3515 TargetLowering::IntrinsicInfo
Info;
3516 assert(!
DAG.getTargetLoweringInfo().getTgtMemIntrinsic(
3517 Info,
I,
DAG.getMachineFunction(),
I.getIntrinsicID()) &&
3518 "Intrinsic touches memory");
3520 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
3523 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad);
3524 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
3528 getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
3529 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
3534void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3535 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3537 if (
I.isInlineAsm()) {
3544 assert(!
I.hasOperandBundles() &&
3545 "Can't have operand bundles for intrinsics");
3546 visitCallBrIntrinsic(
I);
3551 SmallPtrSet<BasicBlock *, 8> Dests;
3552 Dests.
insert(
I.getDefaultDest());
3562 if (
I.isInlineAsm()) {
3563 for (BasicBlock *Dest :
I.getIndirectDests()) {
3565 Target->setIsInlineAsmBrIndirectTarget();
3571 Target->setLabelMustBeEmitted();
3573 if (Dests.
insert(Dest).second)
3582 DAG.getBasicBlock(Return)));
3585void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3586 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3589void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3591 "Call to landingpad not in landing pad!");
3595 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3611 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3616 if (
FuncInfo.ExceptionPointerVirtReg) {
3617 Ops[0] =
DAG.getZExtOrTrunc(
3618 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3625 Ops[1] =
DAG.getZExtOrTrunc(
3626 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3633 DAG.getVTList(ValueVTs),
Ops);
3641 if (JTB.first.HeaderBB ==
First)
3642 JTB.first.HeaderBB =
Last;
3655 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3657 bool Inserted =
Done.insert(BB).second;
3662 addSuccessorWithProb(IndirectBrMBB, Succ);
3672 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3673 DAG.getTarget().Options.NoTrapAfterNoreturn))
3679void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3682 Flags.copyFMF(*FPOp);
3690void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3693 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3694 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3697 Flags.setExact(ExactOp->isExact());
3699 Flags.setDisjoint(DisjointOp->isDisjoint());
3701 Flags.copyFMF(*FPOp);
3710void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3714 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3719 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3721 "Unexpected shift type");
3731 if (
const OverflowingBinaryOperator *OFBinOp =
3733 nuw = OFBinOp->hasNoUnsignedWrap();
3734 nsw = OFBinOp->hasNoSignedWrap();
3736 if (
const PossiblyExactOperator *ExactOp =
3738 exact = ExactOp->isExact();
3741 Flags.setExact(exact);
3742 Flags.setNoSignedWrap(nsw);
3743 Flags.setNoUnsignedWrap(nuw);
3749void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3760void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3766 auto &TLI =
DAG.getTargetLoweringInfo();
3779 Flags.setSameSign(
I.hasSameSign());
3780 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3782 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3787void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3794 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3798 Flags.copyFMF(*FPMO);
3799 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3801 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3810 return isa<SelectInst>(V);
3814void SelectionDAGBuilder::visitSelect(
const User &
I) {
3818 unsigned NumValues = ValueVTs.
size();
3819 if (NumValues == 0)
return;
3829 bool IsUnaryAbs =
false;
3830 bool Negate =
false;
3834 Flags.copyFMF(*FPOp);
3836 Flags.setUnpredictable(
3841 EVT VT = ValueVTs[0];
3842 LLVMContext &Ctx = *
DAG.getContext();
3843 auto &TLI =
DAG.getTargetLoweringInfo();
3853 bool UseScalarMinMax = VT.
isVector() &&
3862 switch (SPR.Flavor) {
3868 switch (SPR.NaNBehavior) {
3881 switch (SPR.NaNBehavior) {
3925 for (
unsigned i = 0; i != NumValues; ++i) {
3931 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3934 for (
unsigned i = 0; i != NumValues; ++i) {
3938 Values[i] =
DAG.getNode(
3945 DAG.getVTList(ValueVTs), Values));
3948void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3951 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3955 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3956 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3962void SelectionDAGBuilder::visitZExt(
const User &
I) {
3966 auto &TLI =
DAG.getTargetLoweringInfo();
3971 Flags.setNonNeg(PNI->hasNonNeg());
3976 if (
Flags.hasNonNeg() &&
3985void SelectionDAGBuilder::visitSExt(
const User &
I) {
3989 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3994void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
4000 Flags.copyFMF(*TruncInst);
4001 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4004 DAG.getTargetConstant(
4009void SelectionDAGBuilder::visitFPExt(
const User &
I) {
4012 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4016 Flags.copyFMF(*TruncInst);
4020void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
4023 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4028void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
4031 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4036void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4039 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4043 Flags.setNonNeg(PNI->hasNonNeg());
4048void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4051 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4056void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4059 const auto &TLI =
DAG.getTargetLoweringInfo();
4067void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4071 auto &TLI =
DAG.getTargetLoweringInfo();
4072 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4081void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4085 auto &TLI =
DAG.getTargetLoweringInfo();
4093void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4096 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4101 if (DestVT !=
N.getValueType())
4109 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4115void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4116 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4117 const Value *SV =
I.getOperand(0);
4122 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4124 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4130void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4131 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4138 InVec, InVal, InIdx));
4141void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4142 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4151void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4156 Mask = SVI->getShuffleMask();
4160 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4164 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4169 DAG.getVectorIdxConstant(0,
DL));
4180 unsigned MaskNumElts =
Mask.size();
4182 if (SrcNumElts == MaskNumElts) {
4188 if (SrcNumElts < MaskNumElts) {
4192 if (MaskNumElts % SrcNumElts == 0) {
4196 unsigned NumConcat = MaskNumElts / SrcNumElts;
4197 bool IsConcat =
true;
4198 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4199 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4205 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4206 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4207 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4212 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4219 for (
auto Src : ConcatSrcs) {
4232 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4233 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4249 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4250 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4252 if (Idx >= (
int)SrcNumElts)
4253 Idx -= SrcNumElts - PaddedMaskNumElts;
4261 if (MaskNumElts != PaddedMaskNumElts)
4263 DAG.getVectorIdxConstant(0,
DL));
4269 assert(SrcNumElts > MaskNumElts);
4273 int StartIdx[2] = {-1, -1};
4274 bool CanExtract =
true;
4275 for (
int Idx : Mask) {
4280 if (Idx >= (
int)SrcNumElts) {
4288 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4289 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4290 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4294 StartIdx[Input] = NewStartIdx;
4297 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4303 for (
unsigned Input = 0; Input < 2; ++Input) {
4304 SDValue &Src = Input == 0 ? Src1 : Src2;
4305 if (StartIdx[Input] < 0)
4306 Src =
DAG.getUNDEF(VT);
4309 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4314 SmallVector<int, 8> MappedOps(Mask);
4315 for (
int &Idx : MappedOps) {
4316 if (Idx >= (
int)SrcNumElts)
4317 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4322 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4331 for (
int Idx : Mask) {
4335 Res =
DAG.getUNDEF(EltVT);
4337 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4338 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4341 DAG.getVectorIdxConstant(Idx,
DL));
4351 ArrayRef<unsigned> Indices =
I.getIndices();
4352 const Value *Op0 =
I.getOperand(0);
4354 Type *AggTy =
I.getType();
4361 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4367 unsigned NumAggValues = AggValueVTs.
size();
4368 unsigned NumValValues = ValValueVTs.
size();
4372 if (!NumAggValues) {
4380 for (; i != LinearIndex; ++i)
4381 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4386 for (; i != LinearIndex + NumValValues; ++i)
4387 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4391 for (; i != NumAggValues; ++i)
4392 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4396 DAG.getVTList(AggValueVTs), Values));
4400 ArrayRef<unsigned> Indices =
I.getIndices();
4401 const Value *Op0 =
I.getOperand(0);
4403 Type *ValTy =
I.getType();
4408 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4412 unsigned NumValValues = ValValueVTs.
size();
4415 if (!NumValValues) {
4424 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4425 Values[i - LinearIndex] =
4431 DAG.getVTList(ValValueVTs), Values));
4434void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4435 Value *Op0 =
I.getOperand(0);
4441 auto &TLI =
DAG.getTargetLoweringInfo();
4446 bool IsVectorGEP =
I.getType()->isVectorTy();
4447 ElementCount VectorElementCount =
4453 const Value *Idx = GTI.getOperand();
4454 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4459 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4469 N =
DAG.getMemBasePlusOffset(
4470 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4476 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4478 TypeSize ElementSize =
4479 GTI.getSequentialElementStride(
DAG.getDataLayout());
4484 bool ElementScalable = ElementSize.
isScalable();
4490 C =
C->getSplatValue();
4493 if (CI && CI->isZero())
4495 if (CI && !ElementScalable) {
4496 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4499 if (
N.getValueType().isVector())
4500 OffsVal =
DAG.getConstant(
4503 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4510 Flags.setNoUnsignedWrap(
true);
4513 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4515 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4523 if (
N.getValueType().isVector()) {
4525 VectorElementCount);
4526 IdxN =
DAG.getSplat(VT, dl, IdxN);
4530 N =
DAG.getSplat(VT, dl,
N);
4536 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4538 SDNodeFlags ScaleFlags;
4547 if (ElementScalable) {
4548 EVT VScaleTy =
N.getValueType().getScalarType();
4551 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4552 if (
N.getValueType().isVector())
4553 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4554 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4559 if (ElementMul != 1) {
4560 if (ElementMul.isPowerOf2()) {
4561 unsigned Amt = ElementMul.logBase2();
4564 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4567 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4569 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4579 SDNodeFlags AddFlags;
4583 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4587 if (IsVectorGEP && !
N.getValueType().isVector()) {
4589 N =
DAG.getSplat(VT, dl,
N);
4600 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4605void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4612 Type *Ty =
I.getAllocatedType();
4613 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4614 auto &
DL =
DAG.getDataLayout();
4615 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4616 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4622 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4624 AllocSize =
DAG.getNode(
4626 DAG.getZExtOrTrunc(
DAG.getTypeSize(dl, MVT::i64, TySize), dl, IntPtr));
4631 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4632 if (*Alignment <= StackAlign)
4633 Alignment = std::nullopt;
4635 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4640 DAG.getConstant(StackAlignMask, dl, IntPtr),
4645 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4649 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4659 return I.getMetadata(LLVMContext::MD_range);
4664 if (std::optional<ConstantRange> CR = CB->getRange())
4668 return std::nullopt;
4673 return CB->getRetNoFPClass();
4677void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4679 return visitAtomicLoad(
I);
4681 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4682 const Value *SV =
I.getOperand(0);
4687 if (Arg->hasSwiftErrorAttr())
4688 return visitLoadFromSwiftError(
I);
4692 if (Alloca->isSwiftError())
4693 return visitLoadFromSwiftError(
I);
4699 Type *Ty =
I.getType();
4703 unsigned NumValues = ValueVTs.
size();
4707 Align Alignment =
I.getAlign();
4708 AAMDNodes AAInfo =
I.getAAMetadata();
4710 bool isVolatile =
I.isVolatile();
4715 bool ConstantMemory =
false;
4722 BatchAA->pointsToConstantMemory(MemoryLocation(
4727 Root =
DAG.getEntryNode();
4728 ConstantMemory =
true;
4732 Root =
DAG.getRoot();
4743 unsigned ChainI = 0;
4744 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4760 MachinePointerInfo PtrInfo =
4762 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4763 : MachinePointerInfo();
4765 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4766 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4767 MMOFlags, AAInfo, Ranges);
4768 Chains[ChainI] =
L.getValue(1);
4770 if (MemVTs[i] != ValueVTs[i])
4771 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4776 if (!ConstantMemory) {
4786 DAG.getVTList(ValueVTs), Values));
4789void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4790 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4791 "call visitStoreToSwiftError when backend supports swifterror");
4794 SmallVector<uint64_t, 4>
Offsets;
4795 const Value *SrcV =
I.getOperand(0);
4797 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4798 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4799 "expect a single EVT for swifterror");
4808 SDValue(Src.getNode(), Src.getResNo()));
4809 DAG.setRoot(CopyNode);
4812void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4813 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4814 "call visitLoadFromSwiftError when backend supports swifterror");
4817 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4818 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4819 "Support volatile, non temporal, invariant for load_from_swift_error");
4821 const Value *SV =
I.getOperand(0);
4822 Type *Ty =
I.getType();
4825 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4827 I.getAAMetadata()))) &&
4828 "load_from_swift_error should not be constant memory");
4831 SmallVector<uint64_t, 4>
Offsets;
4833 ValueVTs,
nullptr, &Offsets, 0);
4834 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4835 "expect a single EVT for swifterror");
4845void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4847 return visitAtomicStore(
I);
4849 const Value *SrcV =
I.getOperand(0);
4850 const Value *PtrV =
I.getOperand(1);
4852 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4857 if (Arg->hasSwiftErrorAttr())
4858 return visitStoreToSwiftError(
I);
4862 if (Alloca->isSwiftError())
4863 return visitStoreToSwiftError(
I);
4870 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4871 unsigned NumValues = ValueVTs.
size();
4884 Align Alignment =
I.getAlign();
4885 AAMDNodes AAInfo =
I.getAAMetadata();
4889 unsigned ChainI = 0;
4890 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4900 MachinePointerInfo PtrInfo =
4902 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4903 : MachinePointerInfo();
4907 if (MemVTs[i] != ValueVTs[i])
4908 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4910 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4911 Chains[ChainI] = St;
4917 DAG.setRoot(StoreNode);
4920void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4921 bool IsCompressing) {
4924 Value *Src0Operand =
I.getArgOperand(0);
4925 Value *PtrOperand =
I.getArgOperand(1);
4926 Value *MaskOperand =
I.getArgOperand(2);
4927 Align Alignment =
I.getParamAlign(1).valueOrOne();
4937 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4940 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4941 MachinePointerInfo(PtrOperand), MMOFlags,
4944 const auto &TLI =
DAG.getTargetLoweringInfo();
4947 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4948 I.getArgOperand(0)->getType(),
true)
4954 DAG.setRoot(StoreNode);
4984 C =
C->getSplatValue();
4998 if (!
GEP ||
GEP->getParent() != CurBB)
5001 if (
GEP->getNumOperands() != 2)
5004 const Value *BasePtr =
GEP->getPointerOperand();
5005 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
5011 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
5016 if (ScaleVal != 1 &&
5028void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5032 const Value *Ptr =
I.getArgOperand(1);
5036 Align Alignment =
I.getParamAlign(1).valueOrOne();
5037 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5046 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5056 EVT IdxVT =
Index.getValueType();
5064 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5066 DAG.setRoot(Scatter);
5070void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5073 Value *PtrOperand =
I.getArgOperand(0);
5074 Value *MaskOperand =
I.getArgOperand(1);
5075 Value *Src0Operand =
I.getArgOperand(2);
5076 Align Alignment =
I.getParamAlign(0).valueOrOne();
5084 AAMDNodes AAInfo =
I.getAAMetadata();
5091 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5094 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5096 if (
I.hasMetadata(LLVMContext::MD_invariant_load))
5099 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5100 MachinePointerInfo(PtrOperand), MMOFlags,
5103 const auto &TLI =
DAG.getTargetLoweringInfo();
5110 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5115 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5122void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5126 const Value *Ptr =
I.getArgOperand(0);
5130 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5132 Align Alignment =
I.getParamAlign(0).valueOrOne();
5143 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5155 EVT IdxVT =
Index.getValueType();
5164 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5180 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5182 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5185 MachineFunction &MF =
DAG.getMachineFunction();
5187 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5188 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5192 dl, MemVT, VTs, InChain,
5200 DAG.setRoot(OutChain);
5203void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5206 switch (
I.getOperation()) {
5248 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5251 MachineFunction &MF =
DAG.getMachineFunction();
5253 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5254 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5257 DAG.getAtomic(NT, dl, MemVT, InChain,
5264 DAG.setRoot(OutChain);
5267void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5269 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5272 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5274 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5281void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5288 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5299 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5300 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5301 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5311 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5314 DAG.setRoot(OutChain);
5317void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5325 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5335 MachineFunction &MF =
DAG.getMachineFunction();
5337 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5338 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5342 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5349 DAG.setRoot(OutChain);
5357std::pair<bool, bool>
5358SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5360 bool HasChain = !
F->doesNotAccessMemory();
5362 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5364 return {HasChain, OnlyLoad};
5368 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5370 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5377 Ops.push_back(
DAG.getRoot());
5390 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5391 const Value *Arg =
I.getArgOperand(i);
5392 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5400 assert(CI->getBitWidth() <= 64 &&
5401 "large intrinsic immediates not handled");
5402 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5409 if (std::optional<OperandBundleUse> Bundle =
5411 auto *Sym = Bundle->Inputs[0].get();
5414 Ops.push_back(SDSym);
5417 if (std::optional<OperandBundleUse> Bundle =
5419 Value *Token = Bundle->Inputs[0].get();
5421 assert(
Ops.back().getValueType() != MVT::Glue &&
5422 "Did not expect another glue node here.");
5425 Ops.push_back(ConvControlToken);
5433 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5441 return DAG.getVTList(ValueVTs);
5445SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5468 if (
I.getType()->isVoidTy())
5483void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5485 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5488 TargetLowering::IntrinsicInfo
Info;
5489 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5490 bool IsTgtMemIntrinsic =
5494 I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &
Info :
nullptr);
5495 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5500 Flags.copyFMF(*FPMO);
5501 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5508 if (IsTgtMemIntrinsic) {
5513 MachinePointerInfo MPI;
5515 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5516 else if (
Info.fallbackAddressSpace)
5517 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5518 EVT MemVT =
Info.memVT;
5520 if (
Size.hasValue() && !
Size.getValue())
5522 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5523 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5524 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5529 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5532 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5589 SDValue TwoToFractionalPartOfX;
5666 if (
Op.getValueType() == MVT::f32 &&
5690 if (
Op.getValueType() == MVT::f32 &&
5789 if (
Op.getValueType() == MVT::f32 &&
5873 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5886 if (
Op.getValueType() == MVT::f32 &&
5963 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5974 if (
Op.getValueType() == MVT::f32 &&
5987 bool IsExp10 =
false;
5988 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5992 IsExp10 = LHSC->isExactlyValue(Ten);
6019 unsigned Val = RHSC->getSExtValue();
6048 CurSquare, CurSquare);
6053 if (RHSC->getSExtValue() < 0)
6067 EVT VT =
LHS.getValueType();
6090 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6094 Opcode, VT, ScaleInt);
6129 switch (
N.getOpcode()) {
6133 Op.getValueType().getSizeInBits());
6158bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6165 MachineFunction &MF =
DAG.getMachineFunction();
6166 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6170 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6175 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6182 auto *NewDIExpr = FragExpr;
6189 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6192 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6193 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6197 if (Kind == FuncArgumentDbgValueKind::Value) {
6202 if (!IsInEntryBlock)
6218 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6219 !
DL->getInlinedAt();
6221 if (!IsInPrologue && !VariableIsFunctionInputArg)
6255 if (VariableIsFunctionInputArg) {
6257 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6258 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6259 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6260 return !NodeMap[
V].getNode();
6265 bool IsIndirect =
false;
6266 std::optional<MachineOperand>
Op;
6268 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6269 if (FI != std::numeric_limits<int>::max())
6273 if (!
Op &&
N.getNode()) {
6276 if (ArgRegsAndSizes.
size() == 1)
6277 Reg = ArgRegsAndSizes.
front().first;
6280 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6287 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6291 if (!
Op &&
N.getNode()) {
6295 if (FrameIndexSDNode *FINode =
6305 for (
const auto &RegAndSize : SplitRegs) {
6309 int RegFragmentSizeInBits = RegAndSize.second;
6311 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6314 if (
Offset >= ExprFragmentSizeInBits)
6318 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6319 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6324 Expr,
Offset, RegFragmentSizeInBits);
6325 Offset += RegAndSize.second;
6328 if (!FragmentExpr) {
6329 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6331 DAG.AddDbgValue(SDV,
false);
6334 MachineInstr *NewMI =
6335 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6336 Kind != FuncArgumentDbgValueKind::Value);
6337 FuncInfo.ArgDbgValues.push_back(NewMI);
6344 if (VMI !=
FuncInfo.ValueMap.end()) {
6345 const auto &TLI =
DAG.getTargetLoweringInfo();
6346 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6347 V->getType(), std::nullopt);
6348 if (RFV.occupiesMultipleRegs()) {
6349 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6354 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6355 }
else if (ArgRegsAndSizes.
size() > 1) {
6358 splitMultiRegDbgValue(ArgRegsAndSizes);
6367 "Expected inlined-at fields to agree");
6368 MachineInstr *NewMI =
nullptr;
6371 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6373 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6377 FuncInfo.ArgDbgValues.push_back(NewMI);
6386 unsigned DbgSDNodeOrder) {
6398 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6399 false, dl, DbgSDNodeOrder);
6401 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6402 false, dl, DbgSDNodeOrder);
6407 case Intrinsic::smul_fix:
6409 case Intrinsic::umul_fix:
6411 case Intrinsic::smul_fix_sat:
6413 case Intrinsic::umul_fix_sat:
6415 case Intrinsic::sdiv_fix:
6417 case Intrinsic::udiv_fix:
6419 case Intrinsic::sdiv_fix_sat:
6421 case Intrinsic::udiv_fix_sat:
6434 "expected call_preallocated_setup Value");
6435 for (
const auto *U : PreallocatedSetup->
users()) {
6437 const Function *Fn = UseCall->getCalledFunction();
6438 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6448bool SelectionDAGBuilder::visitEntryValueDbgValue(
6458 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6459 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6461 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6462 "couldn't find an associated register for the Argument\n");
6465 Register ArgVReg = ArgIt->getSecond();
6467 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6468 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6469 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6470 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6471 DAG.AddDbgValue(SDV,
false );
6474 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6475 "couldn't find a physical register\n");
6480void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6483 switch (Intrinsic) {
6484 case Intrinsic::experimental_convergence_anchor:
6487 case Intrinsic::experimental_convergence_entry:
6490 case Intrinsic::experimental_convergence_loop: {
6492 auto *Token = Bundle->Inputs[0].get();
6500void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6501 unsigned IntrinsicID) {
6504 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6505 "Tried to lower unsupported histogram type");
6511 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6512 DataLayout TargetDL =
DAG.getDataLayout();
6514 Align Alignment =
DAG.getEVTAlign(VT);
6527 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6528 MachinePointerInfo(AS),
6539 EVT IdxVT =
Index.getValueType();
6546 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6549 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6553 DAG.setRoot(Histogram);
6556void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6558 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6559 "Tried lowering invalid vector extract last");
6561 const DataLayout &Layout =
DAG.getDataLayout();
6565 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6575 EVT BoolVT =
Mask.getValueType().getScalarType();
6577 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6584void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6586 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6593 Flags.copyFMF(*FPOp);
6595 switch (Intrinsic) {
6598 visitTargetIntrinsic(
I, Intrinsic);
6600 case Intrinsic::vscale: {
6605 case Intrinsic::vastart: visitVAStart(
I);
return;
6606 case Intrinsic::vaend: visitVAEnd(
I);
return;
6607 case Intrinsic::vacopy: visitVACopy(
I);
return;
6608 case Intrinsic::returnaddress:
6613 case Intrinsic::addressofreturnaddress:
6618 case Intrinsic::sponentry:
6623 case Intrinsic::frameaddress:
6628 case Intrinsic::read_volatile_register:
6629 case Intrinsic::read_register: {
6630 Value *
Reg =
I.getArgOperand(0);
6636 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6641 case Intrinsic::write_register: {
6642 Value *
Reg =
I.getArgOperand(0);
6643 Value *RegValue =
I.getArgOperand(1);
6651 case Intrinsic::memcpy:
6652 case Intrinsic::memcpy_inline: {
6658 "memcpy_inline needs constant size");
6660 Align DstAlign = MCI.getDestAlign().valueOrOne();
6661 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6662 Align Alignment = std::min(DstAlign, SrcAlign);
6663 bool isVol = MCI.isVolatile();
6667 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6668 MCI.isForceInlined(), &
I, std::nullopt,
6669 MachinePointerInfo(
I.getArgOperand(0)),
6670 MachinePointerInfo(
I.getArgOperand(1)),
6672 updateDAGForMaybeTailCall(MC);
6675 case Intrinsic::memset:
6676 case Intrinsic::memset_inline: {
6682 "memset_inline needs constant size");
6684 Align DstAlign = MSII.getDestAlign().valueOrOne();
6685 bool isVol = MSII.isVolatile();
6688 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6689 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6690 updateDAGForMaybeTailCall(MC);
6693 case Intrinsic::memmove: {
6699 Align DstAlign = MMI.getDestAlign().valueOrOne();
6700 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6701 Align Alignment = std::min(DstAlign, SrcAlign);
6702 bool isVol = MMI.isVolatile();
6706 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6708 MachinePointerInfo(
I.getArgOperand(0)),
6709 MachinePointerInfo(
I.getArgOperand(1)),
6711 updateDAGForMaybeTailCall(MM);
6714 case Intrinsic::memcpy_element_unordered_atomic: {
6720 Type *LengthTy =
MI.getLength()->getType();
6721 unsigned ElemSz =
MI.getElementSizeInBytes();
6725 isTC, MachinePointerInfo(
MI.getRawDest()),
6726 MachinePointerInfo(
MI.getRawSource()));
6727 updateDAGForMaybeTailCall(MC);
6730 case Intrinsic::memmove_element_unordered_atomic: {
6736 Type *LengthTy =
MI.getLength()->getType();
6737 unsigned ElemSz =
MI.getElementSizeInBytes();
6741 isTC, MachinePointerInfo(
MI.getRawDest()),
6742 MachinePointerInfo(
MI.getRawSource()));
6743 updateDAGForMaybeTailCall(MC);
6746 case Intrinsic::memset_element_unordered_atomic: {
6752 Type *LengthTy =
MI.getLength()->getType();
6753 unsigned ElemSz =
MI.getElementSizeInBytes();
6757 isTC, MachinePointerInfo(
MI.getRawDest()));
6758 updateDAGForMaybeTailCall(MC);
6761 case Intrinsic::call_preallocated_setup: {
6763 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6770 case Intrinsic::call_preallocated_arg: {
6772 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6786 case Intrinsic::eh_typeid_for: {
6789 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6790 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6795 case Intrinsic::eh_return_i32:
6796 case Intrinsic::eh_return_i64:
6797 DAG.getMachineFunction().setCallsEHReturn(
true);
6804 case Intrinsic::eh_unwind_init:
6805 DAG.getMachineFunction().setCallsUnwindInit(
true);
6807 case Intrinsic::eh_dwarf_cfa:
6812 case Intrinsic::eh_sjlj_callsite: {
6814 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6819 case Intrinsic::eh_sjlj_functioncontext: {
6821 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6824 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6828 case Intrinsic::eh_sjlj_setjmp: {
6833 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6835 DAG.setRoot(
Op.getValue(1));
6838 case Intrinsic::eh_sjlj_longjmp:
6842 case Intrinsic::eh_sjlj_setup_dispatch:
6846 case Intrinsic::masked_gather:
6847 visitMaskedGather(
I);
6849 case Intrinsic::masked_load:
6852 case Intrinsic::masked_scatter:
6853 visitMaskedScatter(
I);
6855 case Intrinsic::masked_store:
6856 visitMaskedStore(
I);
6858 case Intrinsic::masked_expandload:
6859 visitMaskedLoad(
I,
true );
6861 case Intrinsic::masked_compressstore:
6862 visitMaskedStore(
I,
true );
6864 case Intrinsic::powi:
6868 case Intrinsic::log:
6871 case Intrinsic::log2:
6875 case Intrinsic::log10:
6879 case Intrinsic::exp:
6882 case Intrinsic::exp2:
6886 case Intrinsic::pow:
6890 case Intrinsic::sqrt:
6891 case Intrinsic::fabs:
6892 case Intrinsic::sin:
6893 case Intrinsic::cos:
6894 case Intrinsic::tan:
6895 case Intrinsic::asin:
6896 case Intrinsic::acos:
6897 case Intrinsic::atan:
6898 case Intrinsic::sinh:
6899 case Intrinsic::cosh:
6900 case Intrinsic::tanh:
6901 case Intrinsic::exp10:
6902 case Intrinsic::floor:
6903 case Intrinsic::ceil:
6904 case Intrinsic::trunc:
6905 case Intrinsic::rint:
6906 case Intrinsic::nearbyint:
6907 case Intrinsic::round:
6908 case Intrinsic::roundeven:
6909 case Intrinsic::canonicalize: {
6912 switch (Intrinsic) {
6914 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6915 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6916 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6917 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6918 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6919 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6920 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6921 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6922 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6923 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6924 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6925 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6926 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6927 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6928 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6929 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6931 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6938 getValue(
I.getArgOperand(0)).getValueType(),
6942 case Intrinsic::atan2:
6944 getValue(
I.getArgOperand(0)).getValueType(),
6948 case Intrinsic::lround:
6949 case Intrinsic::llround:
6950 case Intrinsic::lrint:
6951 case Intrinsic::llrint: {
6954 switch (Intrinsic) {
6956 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6958 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6959 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6968 case Intrinsic::minnum:
6970 getValue(
I.getArgOperand(0)).getValueType(),
6974 case Intrinsic::maxnum:
6976 getValue(
I.getArgOperand(0)).getValueType(),
6980 case Intrinsic::minimum:
6982 getValue(
I.getArgOperand(0)).getValueType(),
6986 case Intrinsic::maximum:
6988 getValue(
I.getArgOperand(0)).getValueType(),
6992 case Intrinsic::minimumnum:
6994 getValue(
I.getArgOperand(0)).getValueType(),
6998 case Intrinsic::maximumnum:
7000 getValue(
I.getArgOperand(0)).getValueType(),
7004 case Intrinsic::copysign:
7006 getValue(
I.getArgOperand(0)).getValueType(),
7010 case Intrinsic::ldexp:
7012 getValue(
I.getArgOperand(0)).getValueType(),
7016 case Intrinsic::modf:
7017 case Intrinsic::sincos:
7018 case Intrinsic::sincospi:
7019 case Intrinsic::frexp: {
7021 switch (Intrinsic) {
7024 case Intrinsic::sincos:
7027 case Intrinsic::sincospi:
7030 case Intrinsic::modf:
7033 case Intrinsic::frexp:
7039 SDVTList VTs =
DAG.getVTList(ValueVTs);
7041 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7044 case Intrinsic::arithmetic_fence: {
7046 getValue(
I.getArgOperand(0)).getValueType(),
7050 case Intrinsic::fma:
7056#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7057 case Intrinsic::INTRINSIC:
7058#include "llvm/IR/ConstrainedOps.def"
7061#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7062#include "llvm/IR/VPIntrinsics.def"
7065 case Intrinsic::fptrunc_round: {
7069 std::optional<RoundingMode> RoundMode =
7077 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7082 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7087 case Intrinsic::fmuladd: {
7092 getValue(
I.getArgOperand(0)).getValueType(),
7099 getValue(
I.getArgOperand(0)).getValueType(),
7115 case Intrinsic::convert_to_fp16:
7119 DAG.getTargetConstant(0, sdl,
7122 case Intrinsic::convert_from_fp16:
7128 case Intrinsic::fptosi_sat: {
7135 case Intrinsic::fptoui_sat: {
7142 case Intrinsic::set_rounding:
7148 case Intrinsic::is_fpclass: {
7149 const DataLayout DLayout =
DAG.getDataLayout();
7151 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7154 MachineFunction &MF =
DAG.getMachineFunction();
7158 Flags.setNoFPExcept(
7159 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7175 case Intrinsic::get_fpenv: {
7176 const DataLayout DLayout =
DAG.getDataLayout();
7178 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7193 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7196 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7197 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7203 case Intrinsic::set_fpenv: {
7204 const DataLayout DLayout =
DAG.getDataLayout();
7207 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7220 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7222 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7225 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7230 case Intrinsic::reset_fpenv:
7233 case Intrinsic::get_fpmode:
7242 case Intrinsic::set_fpmode:
7247 case Intrinsic::reset_fpmode: {
7252 case Intrinsic::pcmarker: {
7257 case Intrinsic::readcyclecounter: {
7260 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7265 case Intrinsic::readsteadycounter: {
7268 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7273 case Intrinsic::bitreverse:
7275 getValue(
I.getArgOperand(0)).getValueType(),
7278 case Intrinsic::bswap:
7280 getValue(
I.getArgOperand(0)).getValueType(),
7283 case Intrinsic::cttz: {
7291 case Intrinsic::ctlz: {
7299 case Intrinsic::ctpop: {
7305 case Intrinsic::fshl:
7306 case Intrinsic::fshr: {
7307 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7311 EVT VT =
X.getValueType();
7322 case Intrinsic::sadd_sat: {
7328 case Intrinsic::uadd_sat: {
7334 case Intrinsic::ssub_sat: {
7340 case Intrinsic::usub_sat: {
7346 case Intrinsic::sshl_sat: {
7352 case Intrinsic::ushl_sat: {
7358 case Intrinsic::smul_fix:
7359 case Intrinsic::umul_fix:
7360 case Intrinsic::smul_fix_sat:
7361 case Intrinsic::umul_fix_sat: {
7369 case Intrinsic::sdiv_fix:
7370 case Intrinsic::udiv_fix:
7371 case Intrinsic::sdiv_fix_sat:
7372 case Intrinsic::udiv_fix_sat: {
7377 Op1, Op2, Op3,
DAG, TLI));
7380 case Intrinsic::smax: {
7386 case Intrinsic::smin: {
7392 case Intrinsic::umax: {
7398 case Intrinsic::umin: {
7404 case Intrinsic::abs: {
7410 case Intrinsic::scmp: {
7417 case Intrinsic::ucmp: {
7424 case Intrinsic::stacksave: {
7432 case Intrinsic::stackrestore:
7436 case Intrinsic::get_dynamic_area_offset: {
7445 case Intrinsic::stackguard: {
7446 MachineFunction &MF =
DAG.getMachineFunction();
7452 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7456 LLVMContext &Ctx = *
DAG.getContext();
7457 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7464 MachinePointerInfo(
Global, 0), Align,
7473 case Intrinsic::stackprotector: {
7475 MachineFunction &MF =
DAG.getMachineFunction();
7495 Chain, sdl, Src, FIN,
7502 case Intrinsic::objectsize:
7505 case Intrinsic::is_constant:
7508 case Intrinsic::annotation:
7509 case Intrinsic::ptr_annotation:
7510 case Intrinsic::launder_invariant_group:
7511 case Intrinsic::strip_invariant_group:
7516 case Intrinsic::type_test:
7517 case Intrinsic::public_type_test:
7521 case Intrinsic::assume:
7522 case Intrinsic::experimental_noalias_scope_decl:
7523 case Intrinsic::var_annotation:
7524 case Intrinsic::sideeffect:
7529 case Intrinsic::codeview_annotation: {
7531 MachineFunction &MF =
DAG.getMachineFunction();
7540 case Intrinsic::init_trampoline: {
7548 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7556 case Intrinsic::adjust_trampoline:
7561 case Intrinsic::gcroot: {
7562 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7563 "only valid in functions with gc specified, enforced by Verifier");
7565 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7572 case Intrinsic::gcread:
7573 case Intrinsic::gcwrite:
7575 case Intrinsic::get_rounding:
7581 case Intrinsic::expect:
7582 case Intrinsic::expect_with_probability:
7588 case Intrinsic::ubsantrap:
7589 case Intrinsic::debugtrap:
7590 case Intrinsic::trap: {
7591 StringRef TrapFuncName =
7592 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7593 if (TrapFuncName.
empty()) {
7594 switch (Intrinsic) {
7595 case Intrinsic::trap:
7598 case Intrinsic::debugtrap:
7601 case Intrinsic::ubsantrap:
7604 DAG.getTargetConstant(
7610 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7611 I.hasFnAttr(Attribute::NoMerge));
7615 if (Intrinsic == Intrinsic::ubsantrap) {
7616 Value *Arg =
I.getArgOperand(0);
7620 TargetLowering::CallLoweringInfo CLI(
DAG);
7621 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7623 DAG.getExternalSymbol(TrapFuncName.
data(),
7626 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7632 case Intrinsic::allow_runtime_check:
7633 case Intrinsic::allow_ubsan_check:
7637 case Intrinsic::uadd_with_overflow:
7638 case Intrinsic::sadd_with_overflow:
7639 case Intrinsic::usub_with_overflow:
7640 case Intrinsic::ssub_with_overflow:
7641 case Intrinsic::umul_with_overflow:
7642 case Intrinsic::smul_with_overflow: {
7644 switch (Intrinsic) {
7646 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7647 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7648 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7649 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7650 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7651 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7657 EVT OverflowVT = MVT::i1;
7662 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7666 case Intrinsic::prefetch: {
7681 std::nullopt, Flags);
7687 DAG.setRoot(Result);
7690 case Intrinsic::lifetime_start:
7691 case Intrinsic::lifetime_end: {
7692 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7698 if (!LifetimeObject)
7703 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7704 if (SI ==
FuncInfo.StaticAllocaMap.end())
7708 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7712 case Intrinsic::pseudoprobe: {
7720 case Intrinsic::invariant_start:
7725 case Intrinsic::invariant_end:
7728 case Intrinsic::clear_cache: {
7733 {InputChain, StartVal, EndVal});
7738 case Intrinsic::donothing:
7739 case Intrinsic::seh_try_begin:
7740 case Intrinsic::seh_scope_begin:
7741 case Intrinsic::seh_try_end:
7742 case Intrinsic::seh_scope_end:
7745 case Intrinsic::experimental_stackmap:
7748 case Intrinsic::experimental_patchpoint_void:
7749 case Intrinsic::experimental_patchpoint:
7752 case Intrinsic::experimental_gc_statepoint:
7755 case Intrinsic::experimental_gc_result:
7758 case Intrinsic::experimental_gc_relocate:
7761 case Intrinsic::instrprof_cover:
7763 case Intrinsic::instrprof_increment:
7765 case Intrinsic::instrprof_timestamp:
7767 case Intrinsic::instrprof_value_profile:
7769 case Intrinsic::instrprof_mcdc_parameters:
7771 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7773 case Intrinsic::localescape: {
7774 MachineFunction &MF =
DAG.getMachineFunction();
7775 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7779 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7785 "can only escape static allocas");
7790 TII->get(TargetOpcode::LOCAL_ESCAPE))
7798 case Intrinsic::localrecover: {
7800 MachineFunction &MF =
DAG.getMachineFunction();
7806 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7810 Value *
FP =
I.getArgOperand(1);
7816 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7821 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7827 case Intrinsic::fake_use: {
7828 Value *
V =
I.getArgOperand(0);
7833 auto FakeUseValue = [&]() ->
SDValue {
7847 if (!FakeUseValue || FakeUseValue.isUndef())
7850 Ops[1] = FakeUseValue;
7859 case Intrinsic::reloc_none: {
7864 DAG.getTargetExternalSymbol(
7870 case Intrinsic::eh_exceptionpointer:
7871 case Intrinsic::eh_exceptioncode: {
7877 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7878 if (Intrinsic == Intrinsic::eh_exceptioncode)
7879 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7883 case Intrinsic::xray_customevent: {
7886 const auto &Triple =
DAG.getTarget().getTargetTriple();
7895 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7897 Ops.push_back(LogEntryVal);
7898 Ops.push_back(StrSizeVal);
7899 Ops.push_back(Chain);
7905 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7908 DAG.setRoot(patchableNode);
7912 case Intrinsic::xray_typedevent: {
7915 const auto &Triple =
DAG.getTarget().getTargetTriple();
7927 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7929 Ops.push_back(LogTypeId);
7930 Ops.push_back(LogEntryVal);
7931 Ops.push_back(StrSizeVal);
7932 Ops.push_back(Chain);
7938 MachineSDNode *MN =
DAG.getMachineNode(
7939 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7941 DAG.setRoot(patchableNode);
7945 case Intrinsic::experimental_deoptimize:
7948 case Intrinsic::stepvector:
7951 case Intrinsic::vector_reduce_fadd:
7952 case Intrinsic::vector_reduce_fmul:
7953 case Intrinsic::vector_reduce_add:
7954 case Intrinsic::vector_reduce_mul:
7955 case Intrinsic::vector_reduce_and:
7956 case Intrinsic::vector_reduce_or:
7957 case Intrinsic::vector_reduce_xor:
7958 case Intrinsic::vector_reduce_smax:
7959 case Intrinsic::vector_reduce_smin:
7960 case Intrinsic::vector_reduce_umax:
7961 case Intrinsic::vector_reduce_umin:
7962 case Intrinsic::vector_reduce_fmax:
7963 case Intrinsic::vector_reduce_fmin:
7964 case Intrinsic::vector_reduce_fmaximum:
7965 case Intrinsic::vector_reduce_fminimum:
7966 visitVectorReduce(
I, Intrinsic);
7969 case Intrinsic::icall_branch_funnel: {
7975 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7978 "llvm.icall.branch.funnel operand must be a GlobalValue");
7979 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7981 struct BranchFunnelTarget {
7987 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7990 if (ElemBase !=
Base)
7992 "to the same GlobalValue");
7998 "llvm.icall.branch.funnel operand must be a GlobalValue");
8004 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
8005 return T1.Offset < T2.Offset;
8008 for (
auto &
T : Targets) {
8009 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
8010 Ops.push_back(
T.Target);
8013 Ops.push_back(
DAG.getRoot());
8014 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
8023 case Intrinsic::wasm_landingpad_index:
8029 case Intrinsic::aarch64_settag:
8030 case Intrinsic::aarch64_settag_zero: {
8031 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
8032 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
8035 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8041 case Intrinsic::amdgcn_cs_chain: {
8046 Type *RetTy =
I.getType();
8056 for (
unsigned Idx : {2, 3, 1}) {
8057 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8059 Arg.setAttributes(&
I, Idx);
8060 Args.push_back(Arg);
8063 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8064 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8065 Args[2].IsInReg =
true;
8068 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8069 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8071 Arg.setAttributes(&
I, Idx);
8072 Args.push_back(Arg);
8075 TargetLowering::CallLoweringInfo CLI(
DAG);
8078 .setCallee(CC, RetTy, Callee, std::move(Args))
8081 .setConvergent(
I.isConvergent());
8083 std::pair<SDValue, SDValue>
Result =
8087 "Should've lowered as tail call");
8092 case Intrinsic::amdgcn_call_whole_wave: {
8094 bool isTailCall =
I.isTailCall();
8097 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8098 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8099 I.getArgOperand(Idx)->getType());
8100 Arg.setAttributes(&
I, Idx);
8107 Args.push_back(Arg);
8112 auto *Token = Bundle->Inputs[0].get();
8113 ConvControlToken =
getValue(Token);
8116 TargetLowering::CallLoweringInfo CLI(
DAG);
8120 getValue(
I.getArgOperand(0)), std::move(Args))
8124 .setConvergent(
I.isConvergent())
8125 .setConvergenceControlToken(ConvControlToken);
8128 std::pair<SDValue, SDValue>
Result =
8131 if (
Result.first.getNode())
8135 case Intrinsic::ptrmask: {
8151 auto HighOnes =
DAG.getNode(
8152 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8153 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8156 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8157 }
else if (
Mask.getValueType() != PtrVT)
8158 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8164 case Intrinsic::threadlocal_address: {
8168 case Intrinsic::get_active_lane_mask: {
8172 EVT ElementVT =
Index.getValueType();
8183 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8184 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8185 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8188 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8193 case Intrinsic::experimental_get_vector_length: {
8195 "Expected positive VF");
8200 EVT CountVT =
Count.getValueType();
8203 visitTargetIntrinsic(
I, Intrinsic);
8212 if (CountVT.
bitsLT(VT)) {
8217 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8227 case Intrinsic::vector_partial_reduce_add: {
8235 case Intrinsic::vector_partial_reduce_fadd: {
8243 case Intrinsic::experimental_cttz_elts: {
8246 EVT OpVT =
Op.getValueType();
8249 visitTargetIntrinsic(
I, Intrinsic);
8265 ConstantRange VScaleRange(1,
true);
8294 case Intrinsic::vector_insert: {
8302 if (
Index.getValueType() != VectorIdxTy)
8303 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8310 case Intrinsic::vector_extract: {
8318 if (
Index.getValueType() != VectorIdxTy)
8319 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8325 case Intrinsic::experimental_vector_match: {
8331 EVT ResVT =
Mask.getValueType();
8337 visitTargetIntrinsic(
I, Intrinsic);
8341 SDValue Ret =
DAG.getConstant(0, sdl, ResVT);
8343 for (
unsigned i = 0; i < SearchSize; ++i) {
8346 DAG.getVectorIdxConstant(i, sdl));
8349 Ret =
DAG.getNode(
ISD::OR, sdl, ResVT, Ret, Cmp);
8355 case Intrinsic::vector_reverse:
8356 visitVectorReverse(
I);
8358 case Intrinsic::vector_splice:
8359 visitVectorSplice(
I);
8361 case Intrinsic::callbr_landingpad:
8362 visitCallBrLandingPad(
I);
8364 case Intrinsic::vector_interleave2:
8365 visitVectorInterleave(
I, 2);
8367 case Intrinsic::vector_interleave3:
8368 visitVectorInterleave(
I, 3);
8370 case Intrinsic::vector_interleave4:
8371 visitVectorInterleave(
I, 4);
8373 case Intrinsic::vector_interleave5:
8374 visitVectorInterleave(
I, 5);
8376 case Intrinsic::vector_interleave6:
8377 visitVectorInterleave(
I, 6);
8379 case Intrinsic::vector_interleave7:
8380 visitVectorInterleave(
I, 7);
8382 case Intrinsic::vector_interleave8:
8383 visitVectorInterleave(
I, 8);
8385 case Intrinsic::vector_deinterleave2:
8386 visitVectorDeinterleave(
I, 2);
8388 case Intrinsic::vector_deinterleave3:
8389 visitVectorDeinterleave(
I, 3);
8391 case Intrinsic::vector_deinterleave4:
8392 visitVectorDeinterleave(
I, 4);
8394 case Intrinsic::vector_deinterleave5:
8395 visitVectorDeinterleave(
I, 5);
8397 case Intrinsic::vector_deinterleave6:
8398 visitVectorDeinterleave(
I, 6);
8400 case Intrinsic::vector_deinterleave7:
8401 visitVectorDeinterleave(
I, 7);
8403 case Intrinsic::vector_deinterleave8:
8404 visitVectorDeinterleave(
I, 8);
8406 case Intrinsic::experimental_vector_compress:
8408 getValue(
I.getArgOperand(0)).getValueType(),
8413 case Intrinsic::experimental_convergence_anchor:
8414 case Intrinsic::experimental_convergence_entry:
8415 case Intrinsic::experimental_convergence_loop:
8416 visitConvergenceControl(
I, Intrinsic);
8418 case Intrinsic::experimental_vector_histogram_add: {
8419 visitVectorHistogram(
I, Intrinsic);
8422 case Intrinsic::experimental_vector_extract_last_active: {
8423 visitVectorExtractLastActive(
I, Intrinsic);
8426 case Intrinsic::loop_dependence_war_mask:
8431 DAG.getConstant(0, sdl, MVT::i64)));
8433 case Intrinsic::loop_dependence_raw_mask:
8438 DAG.getConstant(0, sdl, MVT::i64)));
8443void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8459 PendingConstrainedFP.push_back(OutChain);
8462 PendingConstrainedFPStrict.push_back(OutChain);
8467void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8481 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8483 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8487 Flags.setNoFPExcept(
true);
8490 Flags.copyFMF(*FPOp);
8495#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8496 case Intrinsic::INTRINSIC: \
8497 Opcode = ISD::STRICT_##DAGN; \
8499#include "llvm/IR/ConstrainedOps.def"
8500 case Intrinsic::experimental_constrained_fmuladd: {
8507 pushFPOpOutChain(
Mul, EB);
8530 if (TM.Options.NoNaNsFPMath)
8538 pushFPOpOutChain(Result, EB);
8545 std::optional<unsigned> ResOPC;
8547 case Intrinsic::vp_ctlz: {
8549 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8552 case Intrinsic::vp_cttz: {
8554 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8557 case Intrinsic::vp_cttz_elts: {
8559 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8562#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8563 case Intrinsic::VPID: \
8564 ResOPC = ISD::VPSD; \
8566#include "llvm/IR/VPIntrinsics.def"
8571 "Inconsistency: no SDNode available for this VPIntrinsic!");
8573 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8574 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8576 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8577 : ISD::VP_REDUCE_FMUL;
8583void SelectionDAGBuilder::visitVPLoad(
8595 Alignment =
DAG.getEVTAlign(VT);
8598 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8599 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8602 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8603 MachinePointerInfo(PtrOperand), MMOFlags,
8605 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8612void SelectionDAGBuilder::visitVPLoadFF(
8615 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8625 Alignment =
DAG.getEVTAlign(VT);
8628 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8629 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8632 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8637 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8640void SelectionDAGBuilder::visitVPGather(
8644 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8656 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8658 *Alignment, AAInfo, Ranges);
8668 EVT IdxVT =
Index.getValueType();
8674 LD =
DAG.getGatherVP(
8675 DAG.getVTList(VT, MVT::Other), VT,
DL,
8676 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8682void SelectionDAGBuilder::visitVPStore(
8686 EVT VT = OpValues[0].getValueType();
8691 Alignment =
DAG.getEVTAlign(VT);
8694 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8697 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8698 MachinePointerInfo(PtrOperand), MMOFlags,
8707void SelectionDAGBuilder::visitVPScatter(
8710 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8712 EVT VT = OpValues[0].getValueType();
8722 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8724 *Alignment, AAInfo);
8734 EVT IdxVT =
Index.getValueType();
8740 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8741 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8742 OpValues[2], OpValues[3]},
8748void SelectionDAGBuilder::visitVPStridedLoad(
8760 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8762 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8765 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8767 *Alignment, AAInfo, Ranges);
8769 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8770 OpValues[2], OpValues[3], MMO,
8778void SelectionDAGBuilder::visitVPStridedStore(
8782 EVT VT = OpValues[0].getValueType();
8788 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8791 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8793 *Alignment, AAInfo);
8797 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8805void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8806 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8817 if (TM.Options.NoNaNsFPMath)
8830 "Unexpected target EVL type");
8833 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8836 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8839void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8847 return visitVPCmp(*CmpI);
8850 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8852 SDVTList VTs =
DAG.getVTList(ValueVTs);
8858 "Unexpected target EVL type");
8862 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8864 if (
I == EVLParamPos)
8871 SDNodeFlags SDFlags;
8879 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8881 case ISD::VP_LOAD_FF:
8882 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8884 case ISD::VP_GATHER:
8885 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8887 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8888 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8891 visitVPStore(VPIntrin, OpValues);
8893 case ISD::VP_SCATTER:
8894 visitVPScatter(VPIntrin, OpValues);
8896 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8897 visitVPStridedStore(VPIntrin, OpValues);
8899 case ISD::VP_FMULADD: {
8900 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8901 SDNodeFlags SDFlags;
8906 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8909 ISD::VP_FMUL,
DL, VTs,
8910 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8912 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8913 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8918 case ISD::VP_IS_FPCLASS: {
8919 const DataLayout DLayout =
DAG.getDataLayout();
8921 auto Constant = OpValues[1]->getAsZExtVal();
8924 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8928 case ISD::VP_INTTOPTR: {
8939 case ISD::VP_PTRTOINT: {
8941 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8954 case ISD::VP_CTLZ_ZERO_UNDEF:
8956 case ISD::VP_CTTZ_ZERO_UNDEF:
8957 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8958 case ISD::VP_CTTZ_ELTS: {
8960 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8970 MachineFunction &MF =
DAG.getMachineFunction();
8978 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8979 if (CallSiteIndex) {
8993 assert(BeginLabel &&
"BeginLabel should've been set");
8995 MachineFunction &MF =
DAG.getMachineFunction();
9007 assert(
II &&
"II should've been set");
9018std::pair<SDValue, SDValue>
9032 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
9035 "Non-null chain expected with non-tail call!");
9036 assert((Result.second.getNode() || !Result.first.getNode()) &&
9037 "Null value expected with tail call!");
9039 if (!Result.second.getNode()) {
9046 PendingExports.clear();
9048 DAG.setRoot(Result.second);
9066 if (!isMustTailCall &&
9067 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9073 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9074 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9083 bool isTailCall,
bool isMustTailCall,
9086 auto &
DL =
DAG.getDataLayout();
9093 const Value *SwiftErrorVal =
nullptr;
9100 const Value *V = *
I;
9103 if (V->getType()->isEmptyTy())
9108 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9120 Args.push_back(Entry);
9131 Value *V = Bundle->Inputs[0];
9133 Entry.IsCFGuardTarget =
true;
9134 Args.push_back(Entry);
9147 "Target doesn't support calls with kcfi operand bundles.");
9155 auto *Token = Bundle->Inputs[0].get();
9156 ConvControlToken =
getValue(Token);
9167 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9180 "This target doesn't support calls with ptrauth operand bundles.");
9184 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9186 if (Result.first.getNode()) {
9201 DAG.setRoot(CopyNode);
9217 LoadTy, Builder.DAG.getDataLayout()))
9218 return Builder.getValue(LoadCst);
9224 bool ConstantMemory =
false;
9227 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9228 Root = Builder.DAG.getEntryNode();
9229 ConstantMemory =
true;
9232 Root = Builder.DAG.getRoot();
9237 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9240 if (!ConstantMemory)
9241 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9247void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9250 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9261bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9262 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9263 const Value *
Size =
I.getArgOperand(2);
9266 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9272 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9276 if (Res.first.getNode()) {
9277 processIntegerCallValue(
I, Res.first,
true);
9291 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9292 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9314 switch (NumBitsToCompare) {
9326 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9339 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9340 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9344 processIntegerCallValue(
I, Cmp,
false);
9353bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9354 const Value *Src =
I.getArgOperand(0);
9355 const Value *
Char =
I.getArgOperand(1);
9356 const Value *
Length =
I.getArgOperand(2);
9358 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9359 std::pair<SDValue, SDValue> Res =
9362 MachinePointerInfo(Src));
9363 if (Res.first.getNode()) {
9377bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9382 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9383 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9385 Align Alignment = std::min(DstAlign, SrcAlign);
9394 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9395 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9396 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9398 "** memcpy should not be lowered as TailCall in mempcpy context **");
9402 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9415bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9416 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9418 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9419 std::pair<SDValue, SDValue> Res =
9422 MachinePointerInfo(Arg0),
9423 MachinePointerInfo(Arg1), isStpcpy);
9424 if (Res.first.getNode()) {
9426 DAG.setRoot(Res.second);
9438bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9439 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9441 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9442 std::pair<SDValue, SDValue> Res =
9445 MachinePointerInfo(Arg0),
9446 MachinePointerInfo(Arg1));
9447 if (Res.first.getNode()) {
9448 processIntegerCallValue(
I, Res.first,
true);
9461bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9462 const Value *Arg0 =
I.getArgOperand(0);
9464 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9467 if (Res.first.getNode()) {
9468 processIntegerCallValue(
I, Res.first,
false);
9481bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9482 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9484 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9485 std::pair<SDValue, SDValue> Res =
9488 MachinePointerInfo(Arg0));
9489 if (Res.first.getNode()) {
9490 processIntegerCallValue(
I, Res.first,
false);
9503bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9508 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9525bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9530 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9543void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9545 if (
I.isInlineAsm()) {
9552 if (Function *
F =
I.getCalledFunction()) {
9553 if (
F->isDeclaration()) {
9555 if (
unsigned IID =
F->getIntrinsicID()) {
9556 visitIntrinsicCall(
I, IID);
9567 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9568 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9572 if (visitMemCmpBCmpCall(
I))
9575 case LibFunc_copysign:
9576 case LibFunc_copysignf:
9577 case LibFunc_copysignl:
9580 if (
I.onlyReadsMemory()) {
9632 case LibFunc_atan2f:
9633 case LibFunc_atan2l:
9658 case LibFunc_sqrt_finite:
9659 case LibFunc_sqrtf_finite:
9660 case LibFunc_sqrtl_finite:
9677 case LibFunc_exp10f:
9678 case LibFunc_exp10l:
9683 case LibFunc_ldexpf:
9684 case LibFunc_ldexpl:
9688 case LibFunc_memcmp:
9689 if (visitMemCmpBCmpCall(
I))
9692 case LibFunc_mempcpy:
9693 if (visitMemPCpyCall(
I))
9696 case LibFunc_memchr:
9697 if (visitMemChrCall(
I))
9700 case LibFunc_strcpy:
9701 if (visitStrCpyCall(
I,
false))
9704 case LibFunc_stpcpy:
9705 if (visitStrCpyCall(
I,
true))
9708 case LibFunc_strcmp:
9709 if (visitStrCmpCall(
I))
9712 case LibFunc_strlen:
9713 if (visitStrLenCall(
I))
9716 case LibFunc_strnlen:
9717 if (visitStrNLenCall(
I))
9741 if (
I.hasDeoptState())
9758 const Value *Discriminator = PAB->Inputs[1];
9760 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9761 assert(Discriminator->getType()->isIntegerTy(64) &&
9762 "Invalid ptrauth discriminator");
9767 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9768 DAG.getDataLayout()))
9808 for (
const auto &Code : Codes)
9823 SDISelAsmOperandInfo &MatchingOpInfo,
9825 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9831 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9833 OpInfo.ConstraintVT);
9834 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9836 MatchingOpInfo.ConstraintVT);
9837 const bool OutOpIsIntOrFP =
9838 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9839 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9840 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9841 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9844 " with a matching output constraint of"
9845 " incompatible type!");
9847 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9854 SDISelAsmOperandInfo &OpInfo,
9867 const Value *OpVal = OpInfo.CallOperandVal;
9885 DL.getPrefTypeAlign(Ty),
false,
9888 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9891 OpInfo.CallOperand = StackSlot;
9904static std::optional<unsigned>
9906 SDISelAsmOperandInfo &OpInfo,
9907 SDISelAsmOperandInfo &RefOpInfo) {
9918 return std::nullopt;
9922 unsigned AssignedReg;
9925 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9928 return std::nullopt;
9933 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9935 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9944 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9949 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9954 OpInfo.CallOperand =
9956 OpInfo.ConstraintVT = RegVT;
9960 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9963 OpInfo.CallOperand =
9965 OpInfo.ConstraintVT = VT;
9972 if (OpInfo.isMatchingInputConstraint())
9973 return std::nullopt;
9975 EVT ValueVT = OpInfo.ConstraintVT;
9976 if (OpInfo.ConstraintVT == MVT::Other)
9980 unsigned NumRegs = 1;
9981 if (OpInfo.ConstraintVT != MVT::Other)
9996 I = std::find(
I, RC->
end(), AssignedReg);
9997 if (
I == RC->
end()) {
10000 return {AssignedReg};
10004 for (; NumRegs; --NumRegs, ++
I) {
10005 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10010 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10011 return std::nullopt;
10016 const std::vector<SDValue> &AsmNodeOperands) {
10019 for (; OperandNo; --OperandNo) {
10021 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10024 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10025 "Skipped past definitions?");
10026 CurOp +=
F.getNumOperandRegisters() + 1;
10034 unsigned Flags = 0;
10037 explicit ExtraFlags(
const CallBase &
Call) {
10039 if (
IA->hasSideEffects())
10041 if (
IA->isAlignStack())
10048 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10064 unsigned get()
const {
return Flags; }
10087void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10094 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10096 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10100 bool HasSideEffect =
IA->hasSideEffects();
10101 ExtraFlags ExtraInfo(
Call);
10103 for (
auto &
T : TargetConstraints) {
10104 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10105 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10107 if (OpInfo.CallOperandVal)
10108 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10110 if (!HasSideEffect)
10111 HasSideEffect = OpInfo.hasMemory(TLI);
10123 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10124 "' expects an integer constant "
10127 ExtraInfo.update(
T);
10135 if (EmitEHLabels) {
10136 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10140 if (IsCallBr || EmitEHLabels) {
10148 if (EmitEHLabels) {
10149 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10154 IA->collectAsmStrs(AsmStrs);
10157 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10165 if (OpInfo.hasMatchingInput()) {
10166 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10197 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10200 OpInfo.isIndirect =
false;
10207 !OpInfo.isIndirect) {
10208 assert((OpInfo.isMultipleAlternative ||
10210 "Can only indirectify direct input operands!");
10216 OpInfo.CallOperandVal =
nullptr;
10219 OpInfo.isIndirect =
true;
10225 std::vector<SDValue> AsmNodeOperands;
10226 AsmNodeOperands.push_back(
SDValue());
10227 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10234 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10238 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10243 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10245 SDISelAsmOperandInfo &RefOpInfo =
10246 OpInfo.isMatchingInputConstraint()
10247 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10249 const auto RegError =
10252 const MachineFunction &MF =
DAG.getMachineFunction();
10254 const char *
RegName =
TRI.getName(*RegError);
10255 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10256 "' allocated for constraint '" +
10257 Twine(OpInfo.ConstraintCode) +
10258 "' does not match required type");
10262 auto DetectWriteToReservedRegister = [&]() {
10263 const MachineFunction &MF =
DAG.getMachineFunction();
10268 emitInlineAsmError(
Call,
"write to reserved register '" +
10277 !OpInfo.isMatchingInputConstraint())) &&
10278 "Only address as input operand is allowed.");
10280 switch (OpInfo.Type) {
10286 "Failed to convert memory constraint code to constraint id.");
10290 OpFlags.setMemConstraint(ConstraintID);
10291 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10293 AsmNodeOperands.push_back(OpInfo.CallOperand);
10298 if (OpInfo.AssignedRegs.
Regs.empty()) {
10299 emitInlineAsmError(
10300 Call,
"couldn't allocate output register for constraint '" +
10301 Twine(OpInfo.ConstraintCode) +
"'");
10305 if (DetectWriteToReservedRegister())
10319 SDValue InOperandVal = OpInfo.CallOperand;
10321 if (OpInfo.isMatchingInputConstraint()) {
10326 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10327 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10328 if (OpInfo.isIndirect) {
10330 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10331 "don't know how to handle tied "
10332 "indirect register inputs");
10337 MachineFunction &MF =
DAG.getMachineFunction();
10342 MVT RegVT =
R->getSimpleValueType(0);
10343 const TargetRegisterClass *RC =
10346 :
TRI.getMinimalPhysRegClass(TiedReg);
10347 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10350 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10354 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10356 OpInfo.getMatchedOperand(), dl,
DAG,
10361 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10362 assert(
Flag.getNumOperandRegisters() == 1 &&
10363 "Unexpected number of operands");
10366 Flag.clearMemConstraint();
10367 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10368 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10370 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10381 std::vector<SDValue>
Ops;
10387 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10388 Twine(OpInfo.ConstraintCode) +
"'");
10392 emitInlineAsmError(
Call,
10393 "invalid operand for inline asm constraint '" +
10394 Twine(OpInfo.ConstraintCode) +
"'");
10400 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10407 assert((OpInfo.isIndirect ||
10409 "Operand must be indirect to be a mem!");
10412 "Memory operands expect pointer values");
10417 "Failed to convert memory constraint code to constraint id.");
10421 ResOpType.setMemConstraint(ConstraintID);
10422 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10425 AsmNodeOperands.push_back(InOperandVal);
10433 "Failed to convert memory constraint code to constraint id.");
10437 SDValue AsmOp = InOperandVal;
10441 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10447 ResOpType.setMemConstraint(ConstraintID);
10449 AsmNodeOperands.push_back(
10452 AsmNodeOperands.push_back(AsmOp);
10458 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10459 Twine(OpInfo.ConstraintCode) +
"'");
10464 if (OpInfo.isIndirect) {
10465 emitInlineAsmError(
10466 Call,
"Don't know how to handle indirect register inputs yet "
10467 "for constraint '" +
10468 Twine(OpInfo.ConstraintCode) +
"'");
10473 if (OpInfo.AssignedRegs.
Regs.empty()) {
10474 emitInlineAsmError(
Call,
10475 "couldn't allocate input reg for constraint '" +
10476 Twine(OpInfo.ConstraintCode) +
"'");
10480 if (DetectWriteToReservedRegister())
10489 0, dl,
DAG, AsmNodeOperands);
10495 if (!OpInfo.AssignedRegs.
Regs.empty())
10505 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10509 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10521 ResultTypes = StructResult->elements();
10522 else if (!CallResultType->
isVoidTy())
10523 ResultTypes =
ArrayRef(CallResultType);
10525 auto CurResultType = ResultTypes.
begin();
10526 auto handleRegAssign = [&](
SDValue V) {
10527 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10528 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10529 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10541 if (ResultVT !=
V.getValueType() &&
10544 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10545 V.getValueType().isInteger()) {
10551 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10557 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10561 if (OpInfo.AssignedRegs.
Regs.empty())
10564 switch (OpInfo.ConstraintType) {
10568 Chain, &Glue, &
Call);
10580 assert(
false &&
"Unexpected unknown constraint");
10584 if (OpInfo.isIndirect) {
10585 const Value *Ptr = OpInfo.CallOperandVal;
10586 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10588 MachinePointerInfo(Ptr));
10595 handleRegAssign(V);
10597 handleRegAssign(Val);
10603 if (!ResultValues.
empty()) {
10604 assert(CurResultType == ResultTypes.
end() &&
10605 "Mismatch in number of ResultTypes");
10607 "Mismatch in number of output operands in asm result");
10610 DAG.getVTList(ResultVTs), ResultValues);
10615 if (!OutChains.
empty())
10618 if (EmitEHLabels) {
10623 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10625 DAG.setRoot(Chain);
10628void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10629 const Twine &Message) {
10630 LLVMContext &Ctx = *
DAG.getContext();
10634 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10638 if (ValueVTs.
empty())
10642 for (
const EVT &VT : ValueVTs)
10643 Ops.push_back(
DAG.getUNDEF(VT));
10648void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10652 DAG.getSrcValue(
I.getArgOperand(0))));
10655void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10656 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10657 const DataLayout &
DL =
DAG.getDataLayout();
10661 DL.getABITypeAlign(
I.getType()).value());
10662 DAG.setRoot(
V.getValue(1));
10664 if (
I.getType()->isPointerTy())
10665 V =
DAG.getPtrExtOrTrunc(
10670void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10674 DAG.getSrcValue(
I.getArgOperand(0))));
10677void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10682 DAG.getSrcValue(
I.getArgOperand(0)),
10683 DAG.getSrcValue(
I.getArgOperand(1))));
10689 std::optional<ConstantRange> CR =
getRange(
I);
10691 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10694 APInt Lo = CR->getUnsignedMin();
10695 if (!
Lo.isMinValue())
10698 APInt Hi = CR->getUnsignedMax();
10699 unsigned Bits = std::max(
Hi.getActiveBits(),
10707 DAG.getValueType(SmallVT));
10708 unsigned NumVals =
Op.getNode()->getNumValues();
10714 Ops.push_back(ZExt);
10715 for (
unsigned I = 1;
I != NumVals; ++
I)
10716 Ops.push_back(
Op.getValue(
I));
10718 return DAG.getMergeValues(
Ops,
SL);
10728 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10736 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10739 MergeOp, TestConst);
10742 return DAG.getMergeValues(
Ops,
SL);
10753 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10756 Args.reserve(NumArgs);
10760 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10761 ArgI != ArgE; ++ArgI) {
10762 const Value *V =
Call->getOperand(ArgI);
10764 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10767 Entry.setAttributes(
Call, ArgI);
10768 Args.push_back(Entry);
10773 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10802 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10811 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10817void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10843 Ops.push_back(Chain);
10844 Ops.push_back(InGlue);
10851 assert(
ID.getValueType() == MVT::i64);
10853 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10854 Ops.push_back(IDConst);
10860 Ops.push_back(ShadConst);
10866 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10870 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10875 DAG.setRoot(Chain);
10878 FuncInfo.MF->getFrameInfo().setHasStackMap();
10882void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10899 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10902 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10903 SDLoc(SymbolicCallee),
10904 SymbolicCallee->getValueType(0));
10914 "Not enough arguments provided to the patchpoint intrinsic");
10917 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10921 TargetLowering::CallLoweringInfo CLI(
DAG);
10926 SDNode *CallEnd =
Result.second.getNode();
10935 "Expected a callseq node.");
10937 bool HasGlue =
Call->getGluedNode();
10962 Ops.push_back(Callee);
10968 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10969 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10972 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10977 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10988 if (IsAnyRegCC && HasDef) {
10990 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10993 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10998 NodeTys =
DAG.getVTList(ValueVTs);
11000 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11017 if (IsAnyRegCC && HasDef) {
11020 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11026 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11029void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11031 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11034 if (
I.arg_size() > 1)
11039 SDNodeFlags SDFlags;
11043 switch (Intrinsic) {
11044 case Intrinsic::vector_reduce_fadd:
11052 case Intrinsic::vector_reduce_fmul:
11060 case Intrinsic::vector_reduce_add:
11063 case Intrinsic::vector_reduce_mul:
11066 case Intrinsic::vector_reduce_and:
11069 case Intrinsic::vector_reduce_or:
11072 case Intrinsic::vector_reduce_xor:
11075 case Intrinsic::vector_reduce_smax:
11078 case Intrinsic::vector_reduce_smin:
11081 case Intrinsic::vector_reduce_umax:
11084 case Intrinsic::vector_reduce_umin:
11087 case Intrinsic::vector_reduce_fmax:
11090 case Intrinsic::vector_reduce_fmin:
11093 case Intrinsic::vector_reduce_fmaximum:
11096 case Intrinsic::vector_reduce_fminimum:
11110 Attrs.push_back(Attribute::SExt);
11112 Attrs.push_back(Attribute::ZExt);
11114 Attrs.push_back(Attribute::InReg);
11116 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11124std::pair<SDValue, SDValue>
11138 "Only supported for non-aggregate returns");
11141 for (
Type *Ty : RetOrigTys)
11150 RetOrigTys.
swap(OldRetOrigTys);
11151 RetVTs.
swap(OldRetVTs);
11152 Offsets.swap(OldOffsets);
11154 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11155 EVT RetVT = OldRetVTs[i];
11159 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11160 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11161 RetVTs.
append(NumRegs, RegisterVT);
11162 for (
unsigned j = 0; j != NumRegs; ++j)
11175 int DemoteStackIdx = -100;
11188 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11189 Entry.IsSRet =
true;
11190 Entry.Alignment = Alignment;
11202 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11204 if (NeedsRegBlock) {
11205 Flags.setInConsecutiveRegs();
11206 if (
I == RetVTs.
size() - 1)
11207 Flags.setInConsecutiveRegsLast();
11209 EVT VT = RetVTs[
I];
11213 for (
unsigned i = 0; i != NumRegs; ++i) {
11227 CLI.
Ins.push_back(Ret);
11236 if (Arg.IsSwiftError) {
11242 CLI.
Ins.push_back(Ret);
11250 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11254 Type *FinalType = Args[i].Ty;
11255 if (Args[i].IsByVal)
11256 FinalType = Args[i].IndirectType;
11259 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11262 Type *ArgTy = OrigArgTy;
11263 if (Args[i].Ty != Args[i].OrigTy) {
11264 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11265 ArgTy = Args[i].Ty;
11270 Args[i].Node.getResNo() +
Value);
11277 Flags.setOrigAlign(OriginalAlignment);
11282 Flags.setPointer();
11285 if (Args[i].IsZExt)
11287 if (Args[i].IsSExt)
11289 if (Args[i].IsNoExt)
11291 if (Args[i].IsInReg) {
11298 Flags.setHvaStart();
11304 if (Args[i].IsSRet)
11306 if (Args[i].IsSwiftSelf)
11307 Flags.setSwiftSelf();
11308 if (Args[i].IsSwiftAsync)
11309 Flags.setSwiftAsync();
11310 if (Args[i].IsSwiftError)
11311 Flags.setSwiftError();
11312 if (Args[i].IsCFGuardTarget)
11313 Flags.setCFGuardTarget();
11314 if (Args[i].IsByVal)
11316 if (Args[i].IsByRef)
11318 if (Args[i].IsPreallocated) {
11319 Flags.setPreallocated();
11327 if (Args[i].IsInAlloca) {
11328 Flags.setInAlloca();
11337 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11338 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11339 Flags.setByValSize(FrameSize);
11342 if (
auto MA = Args[i].Alignment)
11346 }
else if (
auto MA = Args[i].Alignment) {
11349 MemAlign = OriginalAlignment;
11351 Flags.setMemAlign(MemAlign);
11352 if (Args[i].IsNest)
11355 Flags.setInConsecutiveRegs();
11358 unsigned NumParts =
11363 if (Args[i].IsSExt)
11365 else if (Args[i].IsZExt)
11370 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11375 Args[i].Ty->getPointerAddressSpace())) &&
11376 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11389 CLI.
RetZExt == Args[i].IsZExt))
11390 Flags.setReturned();
11396 for (
unsigned j = 0; j != NumParts; ++j) {
11402 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11403 if (NumParts > 1 && j == 0)
11407 if (j == NumParts - 1)
11411 CLI.
Outs.push_back(MyFlags);
11412 CLI.
OutVals.push_back(Parts[j]);
11415 if (NeedsRegBlock &&
Value == NumValues - 1)
11416 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11428 "LowerCall didn't return a valid chain!");
11430 "LowerCall emitted a return value for a tail call!");
11432 "LowerCall didn't emit the correct number of values!");
11444 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11445 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11446 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11447 "LowerCall emitted a value with the wrong type!");
11457 unsigned NumValues = RetVTs.
size();
11458 ReturnValues.
resize(NumValues);
11465 for (
unsigned i = 0; i < NumValues; ++i) {
11472 DemoteStackIdx, Offsets[i]),
11474 ReturnValues[i] = L;
11475 Chains[i] = L.getValue(1);
11482 std::optional<ISD::NodeType> AssertOp;
11487 unsigned CurReg = 0;
11488 for (
EVT VT : RetVTs) {
11494 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11502 if (ReturnValues.
empty())
11508 return std::make_pair(Res, CLI.
Chain);
11525 if (
N->getNumValues() == 1) {
11533 "Lowering returned the wrong number of results!");
11536 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11550 "Copy from a reg to the same reg!");
11551 assert(!Reg.isPhysical() &&
"Is a physreg");
11557 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11562 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11563 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11564 ExtendType = PreferredExtendIt->second;
11567 PendingExports.push_back(Chain);
11579 return A->use_empty();
11581 const BasicBlock &Entry =
A->getParent()->front();
11582 for (
const User *U :
A->users())
11591 std::pair<const AllocaInst *, const StoreInst *>>;
11603 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11605 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11606 StaticAllocas.
reserve(NumArgs * 2);
11608 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11611 V = V->stripPointerCasts();
11613 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11616 return &Iter.first->second;
11633 if (
I.isDebugOrPseudoInst())
11637 for (
const Use &U :
I.operands()) {
11638 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11639 *
Info = StaticAllocaInfo::Clobbered;
11645 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11646 *
Info = StaticAllocaInfo::Clobbered;
11649 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11650 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11656 if (*
Info != StaticAllocaInfo::Unknown)
11664 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11666 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11670 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11671 ArgCopyElisionCandidates.count(Arg)) {
11672 *
Info = StaticAllocaInfo::Clobbered;
11676 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11680 *
Info = StaticAllocaInfo::Elidable;
11681 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11686 if (ArgCopyElisionCandidates.size() == NumArgs)
11710 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11711 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11712 const AllocaInst *AI = ArgCopyIter->second.first;
11713 int FixedIndex = FINode->getIndex();
11715 int OldIndex = AllocaIndex;
11719 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11725 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11726 "greater than stack argument alignment ("
11727 <<
DebugStr(RequiredAlignment) <<
" vs "
11735 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11736 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11742 AllocaIndex = FixedIndex;
11743 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11744 for (
SDValue ArgVal : ArgVals)
11748 const StoreInst *
SI = ArgCopyIter->second.second;
11761void SelectionDAGISel::LowerArguments(
const Function &
F) {
11762 SelectionDAG &DAG =
SDB->DAG;
11763 SDLoc dl =
SDB->getCurSDLoc();
11768 if (
F.hasFnAttribute(Attribute::Naked))
11773 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11775 ISD::ArgFlagsTy
Flags;
11777 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11778 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11788 ArgCopyElisionCandidates);
11791 for (
const Argument &Arg :
F.args()) {
11792 unsigned ArgNo = Arg.getArgNo();
11795 bool isArgValueUsed = !Arg.
use_empty();
11796 unsigned PartBase = 0;
11798 if (Arg.hasAttribute(Attribute::ByVal))
11799 FinalType = Arg.getParamByValType();
11800 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11801 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11802 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11805 EVT VT =
TLI->getValueType(
DL, ArgTy);
11806 ISD::ArgFlagsTy
Flags;
11809 Flags.setPointer();
11812 if (Arg.hasAttribute(Attribute::ZExt))
11814 if (Arg.hasAttribute(Attribute::SExt))
11816 if (Arg.hasAttribute(Attribute::InReg)) {
11823 Flags.setHvaStart();
11829 if (Arg.hasAttribute(Attribute::StructRet))
11831 if (Arg.hasAttribute(Attribute::SwiftSelf))
11832 Flags.setSwiftSelf();
11833 if (Arg.hasAttribute(Attribute::SwiftAsync))
11834 Flags.setSwiftAsync();
11835 if (Arg.hasAttribute(Attribute::SwiftError))
11836 Flags.setSwiftError();
11837 if (Arg.hasAttribute(Attribute::ByVal))
11839 if (Arg.hasAttribute(Attribute::ByRef))
11841 if (Arg.hasAttribute(Attribute::InAlloca)) {
11842 Flags.setInAlloca();
11850 if (Arg.hasAttribute(Attribute::Preallocated)) {
11851 Flags.setPreallocated();
11863 const Align OriginalAlignment(
11864 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11865 Flags.setOrigAlign(OriginalAlignment);
11868 Type *ArgMemTy =
nullptr;
11869 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11872 ArgMemTy = Arg.getPointeeInMemoryValueType();
11874 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11879 if (
auto ParamAlign = Arg.getParamStackAlign())
11880 MemAlign = *ParamAlign;
11881 else if ((ParamAlign = Arg.getParamAlign()))
11882 MemAlign = *ParamAlign;
11884 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11885 if (
Flags.isByRef())
11886 Flags.setByRefSize(MemSize);
11888 Flags.setByValSize(MemSize);
11889 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11890 MemAlign = *ParamAlign;
11892 MemAlign = OriginalAlignment;
11894 Flags.setMemAlign(MemAlign);
11896 if (Arg.hasAttribute(Attribute::Nest))
11899 Flags.setInConsecutiveRegs();
11900 if (ArgCopyElisionCandidates.count(&Arg))
11901 Flags.setCopyElisionCandidate();
11902 if (Arg.hasAttribute(Attribute::Returned))
11903 Flags.setReturned();
11905 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11906 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11907 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11908 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11909 for (
unsigned i = 0; i != NumRegs; ++i) {
11913 ISD::InputArg MyFlags(
11914 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11916 if (NumRegs > 1 && i == 0)
11917 MyFlags.Flags.setSplit();
11920 MyFlags.Flags.setOrigAlign(
Align(1));
11921 if (i == NumRegs - 1)
11922 MyFlags.Flags.setSplitEnd();
11926 if (NeedsRegBlock &&
Value == NumValues - 1)
11927 Ins[Ins.
size() - 1].Flags.setInConsecutiveRegsLast();
11934 SDValue NewRoot =
TLI->LowerFormalArguments(
11935 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11939 "LowerFormalArguments didn't return a valid chain!");
11941 "LowerFormalArguments didn't emit the correct number of values!");
11943 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
11945 "LowerFormalArguments emitted a null value!");
11947 "LowerFormalArguments emitted a value with the wrong type!");
11959 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11960 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11961 std::optional<ISD::NodeType> AssertOp;
11964 F.getCallingConv(), AssertOp);
11966 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11967 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11969 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11970 FuncInfo->DemoteRegister = SRetReg;
11972 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11980 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11981 for (
const Argument &Arg :
F.args()) {
11985 unsigned NumValues = ValueVTs.
size();
11986 if (NumValues == 0)
11993 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11994 unsigned NumParts = 0;
11995 for (EVT VT : ValueVTs)
11996 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
11997 F.getCallingConv(), VT);
12001 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12006 bool isSwiftErrorArg =
12007 TLI->supportSwiftError() &&
12008 Arg.hasAttribute(Attribute::SwiftError);
12009 if (!ArgHasUses && !isSwiftErrorArg) {
12010 SDB->setUnusedArgValue(&Arg, InVals[i]);
12013 if (FrameIndexSDNode *FI =
12015 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12018 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12019 EVT VT = ValueVTs[Val];
12020 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12021 F.getCallingConv(), VT);
12022 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12023 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12028 if (ArgHasUses || isSwiftErrorArg) {
12029 std::optional<ISD::NodeType> AssertOp;
12030 if (Arg.hasAttribute(Attribute::SExt))
12032 else if (Arg.hasAttribute(Attribute::ZExt))
12037 NewRoot,
F.getCallingConv(), AssertOp);
12040 if (NoFPClass !=
fcNone) {
12042 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12044 OutVal, SDNoFPClass);
12053 if (ArgValues.
empty())
12057 if (FrameIndexSDNode *FI =
12059 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12062 SDB->getCurSDLoc());
12064 SDB->setValue(&Arg, Res);
12074 if (LoadSDNode *LNode =
12076 if (FrameIndexSDNode *FI =
12078 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12106 FuncInfo->InitializeRegForValue(&Arg);
12107 SDB->CopyToExportRegsIfNeeded(&Arg);
12111 if (!Chains.
empty()) {
12118 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12122 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12123 for (MachineFunction::VariableDbgInfo &VI :
12124 MF->getInStackSlotVariableDbgInfo()) {
12125 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12126 if (
I != ArgCopyElisionFrameIndexMap.
end())
12127 VI.updateStackSlot(
I->second);
12142SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12143 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12145 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12151 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12155 if (!SuccsHandled.
insert(SuccMBB).second)
12163 for (
const PHINode &PN : SuccBB->phis()) {
12165 if (PN.use_empty())
12169 if (PN.getType()->isEmptyTy())
12173 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12178 RegOut =
FuncInfo.CreateRegs(&PN);
12196 "Didn't codegen value into a register!??");
12206 for (EVT VT : ValueVTs) {
12208 for (
unsigned i = 0; i != NumRegisters; ++i)
12210 Reg += NumRegisters;
12230void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12232 if (MaybeTC.
getNode() !=
nullptr)
12233 DAG.setRoot(MaybeTC);
12238void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12241 MachineFunction *CurMF =
FuncInfo.MF;
12242 MachineBasicBlock *NextMBB =
nullptr;
12247 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12249 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12251 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12259 CaseCluster &
Small = *
W.FirstCluster;
12260 CaseCluster &
Big = *
W.LastCluster;
12264 const APInt &SmallValue =
Small.Low->getValue();
12265 const APInt &BigValue =
Big.Low->getValue();
12268 APInt CommonBit = BigValue ^ SmallValue;
12275 DAG.getConstant(CommonBit,
DL, VT));
12277 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12283 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12285 addSuccessorWithProb(
12286 SwitchMBB, DefaultMBB,
12290 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12298 DAG.getBasicBlock(DefaultMBB));
12300 DAG.setRoot(BrCond);
12312 [](
const CaseCluster &a,
const CaseCluster &b) {
12313 return a.Prob != b.Prob ?
12315 a.Low->getValue().slt(b.Low->getValue());
12322 if (
I->Prob >
W.LastCluster->Prob)
12324 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12332 BranchProbability DefaultProb =
W.DefaultProb;
12333 BranchProbability UnhandledProbs = DefaultProb;
12335 UnhandledProbs +=
I->Prob;
12337 MachineBasicBlock *CurMBB =
W.MBB;
12339 bool FallthroughUnreachable =
false;
12340 MachineBasicBlock *Fallthrough;
12341 if (
I ==
W.LastCluster) {
12343 Fallthrough = DefaultMBB;
12348 CurMF->
insert(BBI, Fallthrough);
12352 UnhandledProbs -=
I->Prob;
12357 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12358 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12361 MachineBasicBlock *JumpMBB =
JT->MBB;
12362 CurMF->
insert(BBI, JumpMBB);
12364 auto JumpProb =
I->Prob;
12365 auto FallthroughProb = UnhandledProbs;
12373 if (*SI == DefaultMBB) {
12374 JumpProb += DefaultProb / 2;
12375 FallthroughProb -= DefaultProb / 2;
12393 if (FallthroughUnreachable) {
12400 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12401 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12407 JT->Default = Fallthrough;
12410 if (CurMBB == SwitchMBB) {
12418 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12421 for (BitTestCase &BTC : BTB->
Cases)
12433 BTB->
Prob += DefaultProb / 2;
12437 if (FallthroughUnreachable)
12441 if (CurMBB == SwitchMBB) {
12448 const Value *
RHS, *
LHS, *MHS;
12450 if (
I->Low ==
I->High) {
12465 if (FallthroughUnreachable)
12469 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12472 if (CurMBB == SwitchMBB)
12475 SL->SwitchCases.push_back(CB);
12480 CurMBB = Fallthrough;
12484void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12485 const SwitchWorkListItem &W,
12488 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12489 "Clusters not sorted?");
12490 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12492 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12493 SL->computeSplitWorkItemInfo(W);
12498 assert(PivotCluster >
W.FirstCluster);
12499 assert(PivotCluster <=
W.LastCluster);
12504 const ConstantInt *Pivot = PivotCluster->Low;
12513 MachineBasicBlock *LeftMBB;
12514 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12515 FirstLeft->Low ==
W.GE &&
12516 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12517 LeftMBB = FirstLeft->MBB;
12519 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12520 FuncInfo.MF->insert(BBI, LeftMBB);
12522 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12530 MachineBasicBlock *RightMBB;
12531 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12532 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12533 RightMBB = FirstRight->MBB;
12535 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12536 FuncInfo.MF->insert(BBI, RightMBB);
12538 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12544 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12547 if (
W.MBB == SwitchMBB)
12550 SL->SwitchCases.push_back(CB);
12575 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12583 unsigned PeeledCaseIndex = 0;
12584 bool SwitchPeeled =
false;
12585 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12586 CaseCluster &CC = Clusters[
Index];
12587 if (CC.
Prob < TopCaseProb)
12589 TopCaseProb = CC.
Prob;
12590 PeeledCaseIndex =
Index;
12591 SwitchPeeled =
true;
12596 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12597 << TopCaseProb <<
"\n");
12602 MachineBasicBlock *PeeledSwitchMBB =
12604 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12607 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12608 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12609 nullptr,
nullptr, TopCaseProb.
getCompl()};
12610 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12612 Clusters.erase(PeeledCaseIt);
12613 for (CaseCluster &CC : Clusters) {
12615 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12616 << CC.
Prob <<
"\n");
12620 PeeledCaseProb = TopCaseProb;
12621 return PeeledSwitchMBB;
12624void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12626 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12628 Clusters.reserve(
SI.getNumCases());
12629 for (
auto I :
SI.cases()) {
12630 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12631 const ConstantInt *CaseVal =
I.getCaseValue();
12632 BranchProbability Prob =
12634 : BranchProbability(1,
SI.getNumCases() + 1);
12638 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12647 MachineBasicBlock *PeeledSwitchMBB =
12648 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12651 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12652 if (Clusters.empty()) {
12653 assert(PeeledSwitchMBB == SwitchMBB);
12655 if (DefaultMBB != NextBlock(SwitchMBB)) {
12662 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12664 SL->findBitTestClusters(Clusters, &SI);
12667 dbgs() <<
"Case clusters: ";
12668 for (
const CaseCluster &
C : Clusters) {
12674 C.Low->getValue().print(
dbgs(),
true);
12675 if (
C.Low !=
C.High) {
12677 C.High->getValue().print(
dbgs(),
true);
12684 assert(!Clusters.empty());
12688 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12692 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12695 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12697 while (!WorkList.
empty()) {
12699 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12704 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12708 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12712void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12713 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12719void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12720 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12725 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12734 SmallVector<int, 8>
Mask;
12736 for (
unsigned i = 0; i != NumElts; ++i)
12737 Mask.push_back(NumElts - 1 - i);
12742void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12751 EVT OutVT = ValueVTs[0];
12755 for (
unsigned i = 0; i != Factor; ++i) {
12756 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12758 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12764 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12766 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12774 DAG.getVTList(ValueVTs), SubVecs);
12778void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12781 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12786 for (
unsigned i = 0; i < Factor; ++i) {
12789 "Expected VTs to be the same");
12807 for (
unsigned i = 0; i < Factor; ++i)
12814void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12818 unsigned NumValues = ValueVTs.
size();
12819 if (NumValues == 0)
return;
12824 for (
unsigned i = 0; i != NumValues; ++i)
12829 DAG.getVTList(ValueVTs), Values));
12832void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12833 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12845 DAG.getSignedConstant(
12852 uint64_t Idx = (NumElts +
Imm) % NumElts;
12855 SmallVector<int, 8>
Mask;
12856 for (
unsigned i = 0; i < NumElts; ++i)
12857 Mask.push_back(Idx + i);
12885 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12886 "start of copy chain MUST be COPY");
12887 Reg =
MI->getOperand(1).getReg();
12890 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12891 MI =
MRI.def_begin(
Reg)->getParent();
12894 if (
MI->getOpcode() == TargetOpcode::COPY) {
12895 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12896 Reg =
MI->getOperand(1).getReg();
12897 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12900 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12901 "end of copy chain MUST be INLINEASM_BR");
12911void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12917 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12918 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12919 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12927 for (
auto &
T : TargetConstraints) {
12928 SDISelAsmOperandInfo OpInfo(
T);
12936 switch (OpInfo.ConstraintType) {
12947 FuncInfo.MBB->addLiveIn(OriginalDef);
12955 ResultVTs.
push_back(OpInfo.ConstraintVT);
12964 ResultVTs.
push_back(OpInfo.ConstraintVT);
12972 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallBase &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)