Go to the documentation of this file.
110 using namespace llvm;
111 using namespace PatternMatch;
113 #define DEBUG_TYPE "isel"
115 STATISTIC(NumFastIselSuccessIndependent,
"Number of insts selected by "
116 "target-independent selector");
117 STATISTIC(NumFastIselSuccessTarget,
"Number of insts selected by "
118 "target-specific selector");
119 STATISTIC(NumFastIselDead,
"Number of dead insts removed on failure");
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
130 EmitStartPt =
nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
133 LastLocalValue = EmitStartPt;
139 if (!FuncInfo.CanLowerReturn)
144 if (!fastLowerArguments())
149 E = FuncInfo.Fn->arg_end();
152 assert(
VI != LocalValueMap.end() &&
"Missed an argument?");
153 FuncInfo.ValueMap[&*
I] =
VI->second;
168 RegDef = MO.getReg();
169 }
else if (MO.getReg().isVirtual()) {
180 if (
P.second == DefReg)
185 void FastISel::flushLocalValueMap() {
188 if (LastLocalValue != EmitStartPt) {
202 if (FuncInfo.RegsWithFixups.count(DefReg))
206 if (EmitStartPt == &LocalMI)
207 EmitStartPt = EmitStartPt->getPrevNode();
210 LocalMI.eraseFromParent();
214 if (FirstNonValue != FuncInfo.MBB->end()) {
227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
232 LocalValueMap.clear();
233 LastLocalValue = EmitStartPt;
235 SavedInsertPt = FuncInfo.InsertPt;
248 if (!TLI.isTypeLegal(VT)) {
251 VT = TLI.getTypeToTransformTo(V->
getContext(), VT).getSimpleVT();
263 if (isa<Instruction>(V) &&
264 (!isa<AllocaInst>(V) ||
265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
266 return FuncInfo.InitializeRegForValue(V);
268 SavePoint SaveInsertPt = enterLocalValueArea();
272 Reg = materializeRegForValue(V, VT);
274 leaveLocalValueArea(SaveInsertPt);
281 if (
const auto *CI = dyn_cast<ConstantInt>(V)) {
282 if (CI->getValue().getActiveBits() <= 64)
284 }
else if (isa<AllocaInst>(V))
285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286 else if (isa<ConstantPointerNull>(V))
291 else if (
const auto *CF = dyn_cast<ConstantFP>(V)) {
292 if (CF->isNullValue())
293 Reg = fastMaterializeFloatZero(CF);
300 const APFloat &Flt = CF->getValueAPF();
301 EVT IntVT = TLI.getPointerTy(
DL);
303 APSInt SIntVal(IntBitWidth,
false);
314 }
else if (
const auto *
Op = dyn_cast<Operator>(V)) {
315 if (!selectOperator(
Op,
Op->getOpcode()))
316 if (!isa<Instruction>(
Op) ||
317 !fastSelectInstruction(cast<Instruction>(
Op)))
319 Reg = lookUpRegForValue(
Op);
320 }
else if (isa<UndefValue>(V)) {
321 Reg = createResultReg(TLI.getRegClassFor(VT));
322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
323 TII.get(TargetOpcode::IMPLICIT_DEF),
Reg);
334 if (isa<Constant>(V))
335 Reg = fastMaterializeConstant(cast<Constant>(V));
340 Reg = materializeConstant(V, VT);
345 LocalValueMap[V] =
Reg;
357 if (
I != FuncInfo.ValueMap.end())
359 return LocalValueMap[V];
363 if (!isa<Instruction>(
I)) {
364 LocalValueMap[
I] =
Reg;
368 Register &AssignedReg = FuncInfo.ValueMap[
I];
372 else if (
Reg != AssignedReg) {
374 for (
unsigned i = 0;
i < NumRegs;
i++) {
375 FuncInfo.RegFixups[AssignedReg +
i] =
Reg +
i;
376 FuncInfo.RegsWithFixups.insert(
Reg +
i);
384 Register IdxN = getRegForValue(Idx);
390 MVT PtrVT = TLI.getPointerTy(
DL);
392 if (IdxVT.
bitsLT(PtrVT)) {
394 }
else if (IdxVT.
bitsGT(PtrVT)) {
402 if (getLastLocalValue()) {
403 FuncInfo.InsertPt = getLastLocalValue();
404 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
410 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
417 assert(
I.isValid() &&
E.isValid() && std::distance(
I,
E) > 0 &&
418 "Invalid iterator!");
420 if (SavedInsertPt ==
I)
422 if (EmitStartPt ==
I)
423 EmitStartPt =
E.isValid() ? &*
E :
nullptr;
424 if (LastLocalValue ==
I)
425 LastLocalValue =
E.isValid() ? &*
E :
nullptr;
429 Dead->eraseFromParent();
436 SavePoint OldInsertPt = FuncInfo.InsertPt;
442 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
443 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
446 FuncInfo.InsertPt = OldInsertPt;
459 if (!TLI.isTypeLegal(VT)) {
464 VT = TLI.getTypeToTransformTo(
I->getContext(), VT);
471 if (
const auto *CI = dyn_cast<ConstantInt>(
I->getOperand(0)))
473 Register Op1 = getRegForValue(
I->getOperand(1));
478 fastEmit_ri_(VT.
getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
484 updateValueMap(
I, ResultReg);
488 Register Op0 = getRegForValue(
I->getOperand(0));
493 if (
const auto *CI = dyn_cast<ConstantInt>(
I->getOperand(1))) {
497 if (ISDOpcode ==
ISD::SDIV && isa<BinaryOperator>(
I) &&
504 if (ISDOpcode ==
ISD::UREM && isa<BinaryOperator>(
I) &&
516 updateValueMap(
I, ResultReg);
520 Register Op1 = getRegForValue(
I->getOperand(1));
526 ISDOpcode, Op0, Op1);
533 updateValueMap(
I, ResultReg);
538 Register N = getRegForValue(
I->getOperand(0));
544 if (isa<VectorType>(
I->getType()))
552 MVT VT = TLI.getPointerTy(
DL);
555 const Value *Idx = GTI.getOperand();
556 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
560 TotalOffs +=
DL.getStructLayout(StTy)->getElementOffset(
Field);
561 if (TotalOffs >= MaxOffs) {
562 N = fastEmit_ri_(VT,
ISD::ADD,
N, TotalOffs, VT);
569 Type *Ty = GTI.getIndexedType();
572 if (
const auto *CI = dyn_cast<ConstantInt>(Idx)) {
576 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
577 TotalOffs +=
DL.getTypeAllocSize(Ty) * IdxN;
578 if (TotalOffs >= MaxOffs) {
579 N = fastEmit_ri_(VT,
ISD::ADD,
N, TotalOffs, VT);
587 N = fastEmit_ri_(VT,
ISD::ADD,
N, TotalOffs, VT);
594 uint64_t ElementSize =
DL.getTypeAllocSize(Ty);
595 Register IdxN = getRegForGEPIndex(Idx);
599 if (ElementSize != 1) {
600 IdxN = fastEmit_ri_(VT,
ISD::MUL, IdxN, ElementSize, VT);
610 N = fastEmit_ri_(VT,
ISD::ADD,
N, TotalOffs, VT);
616 updateValueMap(
I,
N);
621 const CallInst *CI,
unsigned StartIdx) {
622 for (
unsigned i = StartIdx,
e = CI->
arg_size();
i !=
e; ++
i) {
625 if (
const auto *
C = dyn_cast<ConstantInt>(Val)) {
628 }
else if (isa<ConstantPointerNull>(Val)) {
631 }
else if (
auto *AI = dyn_cast<AllocaInst>(Val)) {
635 auto SI = FuncInfo.StaticAllocaMap.find(AI);
636 if (
SI != FuncInfo.StaticAllocaMap.end())
653 assert(
I->getCalledFunction()->getReturnType()->isVoidTy() &&
654 "Stackmap cannot return a value.");
670 "Expected a constant integer.");
675 "Expected a constant integer.");
676 const auto *NumBytes =
690 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
691 for (
unsigned i = 0; ScratchRegs[
i]; ++
i)
693 ScratchRegs[
i],
true,
true,
false,
694 false,
false,
true));
697 unsigned AdjStackDown =
TII.getCallFrameSetupOpcode();
699 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackDown));
706 TII.get(TargetOpcode::STACKMAP));
707 for (
auto const &MO : Ops)
711 unsigned AdjStackUp =
TII.getCallFrameDestroyOpcode();
712 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackUp))
727 bool FastISel::lowerCallOperands(
const CallInst *CI,
unsigned ArgIdx,
728 unsigned NumArgs,
const Value *Callee,
729 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
731 Args.reserve(NumArgs);
734 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
742 Entry.setAttributes(CI, ArgI);
743 Args.push_back(Entry);
750 return lowerCallTo(CLI);
759 return setCallee(CC, ResultTy, Sym,
std::move(ArgsList), FixedArgs);
771 bool HasDef = !
I->getType()->isVoidTy();
776 "Expected a constant integer.");
777 const auto *NumArgsVal =
779 unsigned NumArgs = NumArgsVal->getZExtValue();
784 assert(
I->arg_size() >= NumMetaOpers + NumArgs &&
785 "Not enough arguments provided to the patchpoint intrinsic");
788 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
791 if (!lowerCallOperands(
I, NumMetaOpers, NumCallArgs,
Callee, IsAnyRegCC, CLI))
794 assert(CLI.
Call &&
"No call instruction specified.");
799 if (IsAnyRegCC && HasDef) {
808 "Expected a constant integer.");
813 "Expected a constant integer.");
814 const auto *NumBytes =
819 if (
const auto *
C = dyn_cast<IntToPtrInst>(
Callee)) {
821 cast<ConstantInt>(
C->getOperand(0))->getZExtValue();
823 }
else if (
const auto *
C = dyn_cast<ConstantExpr>(
Callee)) {
824 if (
C->getOpcode() == Instruction::IntToPtr) {
826 cast<ConstantInt>(
C->getOperand(0))->getZExtValue();
830 }
else if (
const auto *GV = dyn_cast<GlobalValue>(
Callee)) {
832 }
else if (isa<ConstantPointerNull>(
Callee))
839 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.
OutRegs.size();
848 for (
unsigned i = NumMetaOpers,
e = NumMetaOpers + NumArgs;
i !=
e; ++
i) {
869 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
870 for (
unsigned i = 0; ScratchRegs[
i]; ++
i)
872 ScratchRegs[
i],
true,
true,
false,
873 false,
false,
true));
882 TII.get(TargetOpcode::PATCHPOINT));
893 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
901 const auto &
Triple =
TM.getTargetTriple();
910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
911 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
920 const auto &
Triple =
TM.getTargetTriple();
931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
932 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
945 Attrs.push_back(Attribute::SExt);
947 Attrs.push_back(Attribute::ZExt);
949 Attrs.push_back(Attribute::InReg);
961 return lowerCallTo(CI, Sym, NumArgs);
970 Args.reserve(NumArgs);
974 for (
unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
982 Entry.setAttributes(CI, ArgI);
983 Args.push_back(Entry);
990 return lowerCallTo(CLI);
1002 bool CanLowerReturn = TLI.CanLowerReturn(
1006 if (!CanLowerReturn)
1009 for (
unsigned I = 0,
E = RetTys.size();
I !=
E; ++
I) {
1013 for (
unsigned i = 0;
i != NumRegs; ++
i) {
1015 MyFlags.
VT = RegisterVT;
1024 CLI.
Ins.push_back(MyFlags);
1033 FinalType =
Arg.IndirectType;
1034 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1046 if (
Arg.IsSwiftSelf)
1048 if (
Arg.IsSwiftAsync)
1050 if (
Arg.IsSwiftError)
1052 if (
Arg.IsCFGuardTarget)
1056 if (
Arg.IsInAlloca) {
1065 if (
Arg.IsPreallocated) {
1075 if (
Arg.IsByVal ||
Arg.IsInAlloca ||
Arg.IsPreallocated) {
1076 unsigned FrameSize =
DL.getTypeAllocSize(
Arg.IndirectType);
1081 MemAlign =
Align(TLI.getByValTypeAlignment(
Arg.IndirectType,
DL));
1083 }
else if (!MemAlign) {
1084 MemAlign =
DL.getABITypeAlign(
Arg.Ty);
1096 if (!fastLowerCall(CLI))
1100 assert(CLI.
Call &&
"No call instruction specified.");
1133 Entry.setAttributes(CI,
i - CI->
arg_begin());
1134 Args.push_back(Entry);
1142 if (IsTailCall && MF->getFunction()
1143 .getFnAttribute(
"disable-tail-calls")
1153 return lowerCallTo(CLI);
1157 const CallInst *Call = cast<CallInst>(
I);
1160 if (
const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1162 if (!IA->getConstraintString().empty())
1165 unsigned ExtraInfo = 0;
1166 if (IA->hasSideEffects())
1168 if (IA->isAlignStack())
1170 if (Call->isConvergent())
1179 const MDNode *SrcLoc = Call->getMetadata(
"srcloc");
1187 if (
const auto *II = dyn_cast<IntrinsicInst>(Call))
1188 return selectIntrinsicCall(II);
1190 return lowerCall(Call);
1198 case Intrinsic::lifetime_start:
1199 case Intrinsic::lifetime_end:
1201 case Intrinsic::donothing:
1203 case Intrinsic::sideeffect:
1205 case Intrinsic::assume:
1207 case Intrinsic::experimental_noalias_scope_decl:
1209 case Intrinsic::dbg_declare: {
1212 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1214 <<
" (!hasDebugInfo)\n");
1219 if (!Address || isa<UndefValue>(Address)) {
1221 <<
" (bad/undef address)\n");
1228 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1229 if (
Arg && FuncInfo.getArgumentFrameIndex(
Arg) != INT_MAX)
1247 if (!
Op && !Address->use_empty() && isa<Instruction>(Address) &&
1248 (!isa<AllocaInst>(Address) ||
1249 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1255 "Expected inlined-at fields to agree");
1259 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1260 TII.get(TargetOpcode::DBG_VALUE),
true, *
Op,
1266 if (UseInstrRefDebugInfo &&
Op->isReg()) {
1267 Builder->setDesc(
TII.get(TargetOpcode::DBG_INSTR_REF));
1268 Builder->getOperand(1).ChangeToImmediate(0);
1277 <<
" (no materialized reg for address)\n");
1281 case Intrinsic::dbg_value: {
1287 "Expected inlined-at fields to agree");
1288 if (!V || isa<UndefValue>(V) || DI->
hasArgList()) {
1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
false, 0U,
1293 }
else if (
const auto *CI = dyn_cast<ConstantInt>(V)) {
1298 if (CI->getBitWidth() > 64)
1299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1306 .
addImm(CI->getZExtValue())
1310 }
else if (
const auto *CF = dyn_cast<ConstantFP>(V)) {
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1318 bool IsIndirect =
false;
1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect,
Reg,
1325 if (UseInstrRefDebugInfo) {
1326 Builder->setDesc(
TII.get(TargetOpcode::DBG_INSTR_REF));
1327 Builder->getOperand(1).ChangeToImmediate(0);
1335 case Intrinsic::dbg_label: {
1338 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1343 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1347 case Intrinsic::objectsize:
1350 case Intrinsic::is_constant:
1353 case Intrinsic::launder_invariant_group:
1354 case Intrinsic::strip_invariant_group:
1355 case Intrinsic::expect: {
1359 updateValueMap(II, ResultReg);
1362 case Intrinsic::experimental_stackmap:
1363 return selectStackmap(II);
1364 case Intrinsic::experimental_patchpoint_void:
1365 case Intrinsic::experimental_patchpoint_i64:
1366 return selectPatchpoint(II);
1368 case Intrinsic::xray_customevent:
1369 return selectXRayCustomEvent(II);
1370 case Intrinsic::xray_typedevent:
1371 return selectXRayTypedEvent(II);
1374 return fastLowerIntrinsicCall(II);
1378 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
1379 EVT DstVT = TLI.getValueType(
DL,
I->getType());
1387 if (!TLI.isTypeLegal(DstVT))
1391 if (!TLI.isTypeLegal(SrcVT))
1394 Register InputReg = getRegForValue(
I->getOperand(0));
1404 updateValueMap(
I, ResultReg);
1409 EVT SrcEVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
1410 EVT DstEVT = TLI.getValueType(
DL,
I->getType());
1412 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1418 Register Op0 = getRegForValue(
I->getOperand(0));
1423 if (SrcVT == DstVT) {
1424 updateValueMap(
I, Op0);
1433 updateValueMap(
I, ResultReg);
1443 EVT ETy = TLI.getValueType(
DL,
I->getOperand(0)->getType());
1444 if (ETy ==
MVT::Other || !TLI.isTypeLegal(ETy))
1450 Register ResultReg = createResultReg(TyRegClass);
1451 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1454 updateValueMap(
I, ResultReg);
1460 void FastISel::removeDeadLocalValueCode(
MachineInstr *SavedLastLocalValue)
1463 if (CurLastLocalValue != SavedLastLocalValue) {
1468 if (SavedLastLocalValue)
1471 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1472 setLastLocalValue(SavedLastLocalValue);
1473 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1481 flushLocalValueMap();
1483 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1486 if (
I->isTerminator()) {
1487 if (!handlePHINodesInSuccessorBlocks(
I->getParent())) {
1492 removeDeadLocalValueCode(SavedLastLocalValue);
1498 if (
auto *Call = dyn_cast<CallBase>(
I))
1499 for (
unsigned i = 0,
e = Call->getNumOperandBundles();
i !=
e; ++
i)
1503 DbgLoc =
I->getDebugLoc();
1505 SavedInsertPt = FuncInfo.InsertPt;
1507 if (
const auto *Call = dyn_cast<CallInst>(
I)) {
1508 const Function *
F = Call->getCalledFunction();
1513 if (
F && !
F->hasLocalLinkage() &&
F->hasName() &&
1514 LibInfo->getLibFunc(
F->getName(), Func) &&
1515 LibInfo->hasOptimizedCodeGen(Func))
1519 if (
F &&
F->getIntrinsicID() == Intrinsic::trap &&
1520 Call->hasFnAttr(
"trap-func-name"))
1525 if (!SkipTargetIndependentISel) {
1526 if (selectOperator(
I,
I->getOpcode())) {
1527 ++NumFastIselSuccessIndependent;
1532 recomputeInsertPt();
1533 if (SavedInsertPt != FuncInfo.InsertPt)
1534 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1535 SavedInsertPt = FuncInfo.InsertPt;
1538 if (fastSelectInstruction(
I)) {
1539 ++NumFastIselSuccessTarget;
1544 recomputeInsertPt();
1545 if (SavedInsertPt != FuncInfo.InsertPt)
1546 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1550 if (
I->isTerminator()) {
1553 removeDeadLocalValueCode(SavedLastLocalValue);
1554 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1563 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1564 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1578 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1587 if (TrueMBB != FalseMBB) {
1590 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->
getBasicBlock());
1593 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1596 fastEmitBranch(FalseMBB, DbgLoc);
1606 EVT VT = TLI.getValueType(
DL,
I->getType());
1610 updateValueMap(
I, ResultReg);
1619 if (!TLI.isTypeLegal(IntVT))
1627 Register IntResultReg = fastEmit_ri_(
1638 updateValueMap(
I, ResultReg);
1649 EVT RealVT = TLI.getValueType(
DL, EVI->
getType(),
true);
1653 if (!TLI.isTypeLegal(VT) && VT !=
MVT::i1)
1662 if (
I != FuncInfo.ValueMap.end())
1663 ResultReg =
I->second;
1664 else if (isa<Instruction>(Op0))
1665 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1675 for (
unsigned i = 0;
i < VTIndex;
i++)
1676 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[
i]);
1678 updateValueMap(EVI, ResultReg);
1686 case Instruction::FAdd:
1688 case Instruction::Sub:
1690 case Instruction::FSub:
1694 case Instruction::FMul:
1696 case Instruction::SDiv:
1698 case Instruction::UDiv:
1700 case Instruction::FDiv:
1702 case Instruction::SRem:
1704 case Instruction::URem:
1706 case Instruction::FRem:
1708 case Instruction::Shl:
1710 case Instruction::LShr:
1712 case Instruction::AShr:
1714 case Instruction::And:
1716 case Instruction::Or:
1718 case Instruction::Xor:
1721 case Instruction::FNeg:
1722 return selectFNeg(
I,
I->getOperand(0));
1724 case Instruction::GetElementPtr:
1725 return selectGetElementPtr(
I);
1727 case Instruction::Br: {
1742 case Instruction::Unreachable:
1743 if (
TM.Options.TrapUnreachable)
1748 case Instruction::Alloca:
1750 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(
I)))
1763 if (
TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(
I))
1765 return selectCall(
I);
1767 case Instruction::BitCast:
1768 return selectBitCast(
I);
1770 case Instruction::FPToSI:
1772 case Instruction::ZExt:
1774 case Instruction::SExt:
1776 case Instruction::Trunc:
1778 case Instruction::SIToFP:
1781 case Instruction::IntToPtr:
1782 case Instruction::PtrToInt: {
1783 EVT SrcVT = TLI.getValueType(
DL,
I->getOperand(0)->getType());
1784 EVT DstVT = TLI.getValueType(
DL,
I->getType());
1792 updateValueMap(
I,
Reg);
1796 case Instruction::ExtractValue:
1797 return selectExtractValue(
I);
1799 case Instruction::Freeze:
1800 return selectFreeze(
I);
1802 case Instruction::PHI:
1813 bool SkipTargetIndependentISel)
1814 : FuncInfo(FuncInfo), MF(FuncInfo.MF),
MRI(FuncInfo.MF->getRegInfo()),
1815 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1816 TM(FuncInfo.MF->getTarget()),
DL(MF->getDataLayout()),
1817 TII(*MF->getSubtarget().getInstrInfo()),
1818 TLI(*MF->getSubtarget().getTargetLowering()),
1819 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1820 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1893 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1902 if (
Op.isVirtual()) {
1971 unsigned Op1,
unsigned Op2) {
2105 "Cannot yet extract from physregs");
2109 ResultReg).
addReg(Op0, 0, Idx);
2125 bool FastISel::handlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
2135 if (!isa<PHINode>(SuccBB->
begin()))
2141 if (!SuccsHandled.
insert(SuccMBB).second)
2169 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2174 if (
const auto *Inst = dyn_cast<Instruction>(PHIOp))
2175 DbgLoc = Inst->getDebugLoc();
2192 "tryToFoldLoad expected a LoadInst with a single use");
2196 unsigned MaxUsers = 6;
2199 while (TheUser != FoldInst &&
2212 if (TheUser != FoldInst)
2253 if (!isa<AddOperator>(Add))
2260 if (isa<Instruction>(Add) &&
2264 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2275 if (
const auto *LI = dyn_cast<LoadInst>(
I)) {
2276 Alignment = LI->getAlign();
2279 Ptr = LI->getPointerOperand();
2280 ValTy = LI->getType();
2281 }
else if (
const auto *
SI = dyn_cast<StoreInst>(
I)) {
2282 Alignment =
SI->getAlign();
2285 Ptr =
SI->getPointerOperand();
2286 ValTy =
SI->getValueOperand()->getType();
2290 bool IsNonTemporal =
I->hasMetadata(LLVMContext::MD_nontemporal);
2291 bool IsInvariant =
I->hasMetadata(LLVMContext::MD_invariant_load);
2292 bool IsDereferenceable =
I->hasMetadata(LLVMContext::MD_dereferenceable);
2293 const MDNode *Ranges =
I->getMetadata(LLVMContext::MD_range);
2306 if (IsDereferenceable)
2312 *Alignment, AAInfo, Ranges);
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
virtual const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const
Returns the largest legal sub-class of RC that supports the sub-register index Idx.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
DIExpression * getExpression() const
This class represents an incoming formal argument to a Function.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This is an optimization pass for GlobalISel generic memory operations.
Value * getAddress() const
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs)
bool selectPatchpoint(const CallInst *I)
SmallVector< ISD::ArgFlagsTy, 16 > OutFlags
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
A parsed version of the target data layout string in and methods for querying it.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Register getRegForValue(const Value *V)
Create a virtual register and arrange for it to be assigned the value for the given LLVM value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode, const ConstantFP *FPImm)
This method is called by target-independent code to request that an instruction with the given type,...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
bool selectCast(const User *I, unsigned Opcode)
Context object for machine code objects.
const MachineInstrBuilder & add(const MachineOperand &MO) const
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
void updateValueMap(const Value *I, Register Reg, unsigned NumRegs=1)
Update the value map to include the new mapping for this instruction, or insert an extra copy to get ...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
void setMemAlign(Align A)
Target - Wrapper for Target specific information.
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, MVT ImmType)
This method is a wrapper of fastEmit_ri.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register fastEmitInst_f(unsigned MachineInstOpcode, const TargetRegisterClass *RC, const ConstantFP *FPImm)
Emit a MachineInstr with a floating point immediate, and a result register in the given register clas...
CallLoweringInfo & setIsPatchPoint(bool Value=true)
FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo, bool SkipTargetIndependentISel=false)
SmallVector< Register, 4 > InRegs
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
void finishBasicBlock()
Flush the local value map.
DenseSet< Register > RegsWithFixups
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
bool lowerArguments()
Do "fast" instruction selection for function arguments and append the machine instructions to the cur...
Register fastEmitZExtFromI1(MVT VT, unsigned Op0)
Emit MachineInstrs to compute the value of Op with all but the least significant bit set to zero.
Reg
All possible values of the reg field in the ModR/M byte.
@ MOInvariant
The memory access always returns the same value (or traps).
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
bool selectFreeze(const User *I)
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
SavePoint enterLocalValueArea()
Prepare InsertPt to begin inserting instructions into the local value area and return the old insert ...
Triple - Helper class for working with autoconf configuration names.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ ICMP_SGT
signed greater than
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
SmallVector< Value *, 16 > OutVals
The instances of the Type class are immutable: once they are created, they are never changed.
MachineBasicBlock * MBB
MBB - The current block.
void diagnoseDontCall(const CallInst &CI)
Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm1, uint64_t Imm2)
Emit a MachineInstr with one register operand and two immediate operands.
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1)
This method is called by target-independent code to request that an instruction with the given type,...
A description of a memory reference used in the backend.
FunctionType * getFunctionType() const
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
DILocalVariable * getVariable() const
const TargetLowering & TLI
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
bool isOSLinux() const
Tests whether the OS is Linux.
@ ICMP_SLE
signed less or equal
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
gep_type_iterator gep_type_begin(const User *GEP)
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
unsigned const TargetRegisterInfo * TRI
Register getRegForGEPIndex(const Value *Idx)
This is a wrapper around getRegForValue that also takes care of truncating or sign-extending the give...
unsigned getOperandNo() const
getOperandNo - Return the operand # of this MachineOperand in its MachineInstr.
LLVM Basic Block Representation.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
@ INLINEASM
INLINEASM - Represents an inline asm block.
gep_type_iterator gep_type_end(const User *GEP)
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
@ FCMP_ULT
1 1 0 0 True if unordered or less than
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void setHasStackMap(bool s=true)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
FunctionLoweringInfo & FuncInfo
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
This represents the llvm.dbg.value instruction.
static MachineOperand CreateImm(int64_t Val)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Register createResultReg(const TargetRegisterClass *RC)
(vector float) vec_cmpeq(*A, *B) C
@ ICMP_ULE
unsigned less or equal
iterator begin()
Instruction iterator methods.
void leaveLocalValueArea(SavePoint Old)
Reset InsertPt to the given old insert position.
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
bool selectXRayTypedEvent(const CallInst *II)
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
const HexagonInstrInfo * TII
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Describe properties that are true of each instruction in the target description file.
MachineOperand class - Representation of each machine instruction operand.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode)
This method is called by target-independent code to request that an instruction with the given type a...
Class to represent integer types.
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
An arbitrary precision integer that knows its signedness.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
bool selectStackmap(const CallInst *I)
STATISTIC(NumFunctions, "Total number of functions")
ConstantFP - Floating Point Values [float, double].
static bool isCommutative(Instruction *I)
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
static MachineOperand CreateFI(int Idx)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
MachineRegisterInfo & MRI
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ AND
Bitwise operators - logical and, logical or, logical xor.
ArchType getArch() const
Get the parsed architecture type of this triple.
CallingConv::ID getCallingConv() const
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
MachineMemOperand * createMachineMemOperandFor(const Instruction *I) const
Create a machine mem operand from the given instruction.
unsigned OrigNumPHINodesToUpdate
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
This class is the base class for the comparison instructions.
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II)
This method is called by target-independent code to do target- specific intrinsic lowering.
@ FADD
Simple binary floating point operators.
bool selectInstruction(const Instruction *I)
Do "fast" instruction selection for the given LLVM IR instruction and append the generated machine in...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Representation of each machine instruction.
virtual bool tryToFoldLoadIntoMI(MachineInstr *, unsigned, const LoadInst *)
The specified machine instr operand is a vreg, and that vreg is being provided by the specified load ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
This represents the llvm.dbg.declare instruction.
This represents the llvm.dbg.label instruction.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
virtual bool fastLowerCall(CallLoweringInfo &CLI)
This method is called by target-independent code to do target- specific call lowering.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm)
This method is called by target-independent code to request that an instruction with the given type,...
void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc)
Emit an unconditional branch to the given block, unless it is the immediate (fall-through) successor,...
DILabel * getLabel() const
@ TRAP
TRAP - Trapping instruction.
This class contains a discriminated union of information about pointers in memory operands,...
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
bool selectIntrinsicCall(const IntrinsicInst *II)
static Register findLocalRegDef(MachineInstr &MI)
Return the defined register if this instruction defines exactly one virtual register and uses no othe...
Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, uint64_t Imm)
Emit a MachineInstr with two register operands, an immediate, and a result register in the given regi...
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void setOrigAlign(Align A)
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Flags
Flags values. These may be or'd together.
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0)
This method is called by target-independent code to request that an instruction with the given type,...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void setInConsecutiveRegs(bool Flag=true)
StandardInstrumentations SI(Debug, VerifyEach)
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Register fastEmitInst_(unsigned MachineInstOpcode, const TargetRegisterClass *RC)
Emit a MachineInstr with no operands and a result register in the given register class.
@ ICMP_UGE
unsigned greater or equal
@ MONonTemporal
The memory access is non-temporal.
virtual bool fastLowerArguments()
This method is called by target-independent code to do target- specific argument lowering.
TargetLoweringBase::ArgListTy ArgListTy
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isUnconditional() const
void setByValSize(unsigned S)
@ ICMP_SLT
signed less than
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
bool selectOperator(const User *I, unsigned Opcode)
Do "fast" instruction selection for the given LLVM IR operator (Instruction or ConstantExpr),...
MachineInstrBundleIterator< MachineInstr > iterator
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Class to represent struct types.
StringRef - Represent a constant reference to a string, i.e.
MachineBasicBlock MachineBasicBlock::iterator MBBI
@ ICMP_ULT
unsigned less than
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add)
Check if Add is an add that can be safely folded into GEP.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
An instruction for reading from memory.
static bool isRegUsedByPhiNodes(Register DefReg, FunctionLoweringInfo &FuncInfo)
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
unsigned const MachineRegisterInfo * MRI
Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, uint64_t Imm)
Emit a MachineInstr with a register operand, an immediate, and a result register in the given registe...
Wrapper class representing virtual and physical registers.
amdgpu Simplify well known AMD library false FunctionCallee Callee
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
reg_iterator reg_begin(Register RegNo) const
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst)
We're checking to see if we can fold LI into FoldInst.
static constexpr roundingMode rmTowardZero
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool lowerCall(const CallInst *I)
bool selectExtractValue(const User *U)
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
SmallVector< Register, 16 > OutRegs
bool selectFNeg(const User *I, const Value *In)
Emit an FNeg operation.
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
unsigned arg_size() const
std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
Provides information about what library functions are available for the current target.
bool selectXRayCustomEvent(const CallInst *II)
@ ICMP_SGE
signed greater or equal
Value * getCalledOperand() const
@ MOStore
The memory access writes data.
@ ADD
Simple integer binary arithmetic operators.
A wrapper class for inspecting calls to intrinsic functions.
static Type * getVoidTy(LLVMContext &C)
SmallVector< ISD::InputArg, 4 > Ins
bool selectGetElementPtr(const User *I)
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
@ SHL
Shift and rotation operations.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
static MachineOperand CreateRegMask(const uint32_t *Mask)
CreateRegMask - Creates a register mask operand referencing Mask.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
@ ICMP_UGT
unsigned greater than
Value * getValue(unsigned OpIdx=0) const
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Value * getArgOperand(unsigned i) const
Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1, unsigned Op2)
Emit a MachineInstr with three register operands and a result register in the given register class.
const BasicBlock * getParent() const
void removeDeadCode(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E)
Remove all dead instructions between the I and E.
Predicate getPredicate() const
Return the predicate for this instruction.
CallLoweringInfo & setTailCall(bool Value=true)
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reverse_iterator rend(StringRef path)
Get reverse end iterator over path.
const char LLVMTargetMachineRef TM
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This class represents a function call, abstracting a target machine's calling convention.
const TargetRegisterInfo & TRI
@ FNEG
Perform various unary floating-point operations inspired by libm.
bool selectCall(const User *I)
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Value * getOperand(unsigned i) const
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, const CallBase &Call)
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool selectBitCast(const User *I)
Conditional or Unconditional Branch instruction.
@ SIGN_EXTEND
Conversion operators.
const TargetInstrInfo & TII
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx)
Emit a MachineInstr for an extract_subreg from a specified index of a superregister to a specified ty...
Register lookUpRegForValue(const Value *V)
Look up the value to see if its value is already cached in a register.
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB)
Emit an unconditional branch to FalseMBB, obtains the branch weight and adds TrueMBB and FalseMBB to ...
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, unsigned Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
LLVM Value Representation.
bool selectBinaryOp(const User *I, unsigned ISDOpcode)
Select and emit code for a binary operator instruction, which has an opcode which directly correspond...
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function.
void recomputeInsertPt()
Reset InsertPt to prepare for inserting instructions into the current block.
void startNewBlock()
Set the current block to which generated machine instructions will be appended.
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
BasicBlock * getSuccessor(unsigned i) const
Register constrainOperandRegClass(const MCInstrDesc &II, Register Op, unsigned OpNum)
Try to constrain Op so that it is usable by argument OpNum of the provided MCInstrDesc.
Register fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0)
Emit a MachineInstr with one register operand and a result register in the given register class.
Register fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm)
Emit a MachineInstr with a single immediate operand, and a result register in the given register clas...
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Class to represent function types.
const MCPhysReg * ImplicitDefs
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.