66#include "llvm/IR/IntrinsicsAMDGPU.h"
96#define DEBUG_TYPE "irtranslator"
102 cl::desc(
"Should enable CSE in irtranslator"),
120 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
124 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
125 R << (
" (in function: " + MF.getName() +
")").str();
127 if (TPC.isGlobalISelAbortEnabled())
144 DILocationVerifier() =
default;
145 ~DILocationVerifier() =
default;
147 const Instruction *getCurrentInst()
const {
return CurrInst; }
148 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
155 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
160 <<
" was copied to " <<
MI);
166 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
167 (
MI.isDebugInstr())) &&
168 "Line info was not transferred to all instructions");
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
225 if (!isa<Constant>(Val)) {
226 for (
auto Ty : SplitTys)
233 auto &
C = cast<Constant>(Val);
235 while (
auto Elt =
C.getAggregateElement(
Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
237 llvm::copy(EltRegs, std::back_inserter(*VRegs));
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
242 bool Success = translate(cast<Constant>(Val), VRegs->front());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto MapEntry = FrameIndices.find(&AI);
258 if (MapEntry != FrameIndices.end())
259 return MapEntry->second;
263 ElementSize * cast<ConstantInt>(AI.
getArraySize())->getZExtValue();
266 Size = std::max<uint64_t>(
Size, 1u);
268 int &FI = FrameIndices[&AI];
274 if (
const StoreInst *SI = dyn_cast<StoreInst>(&
I))
275 return SI->getAlign();
276 if (
const LoadInst *LI = dyn_cast<LoadInst>(&
I))
277 return LI->getAlign();
284 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
291 assert(
MBB &&
"BasicBlock was not encountered before");
296 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
297 MachinePreds[Edge].push_back(NewPred);
300bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
306 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
307 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
310 if (isa<Instruction>(U)) {
319bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
321 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
324 if (isa<Instruction>(U)) {
333 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
336bool IRTranslator::translateCompare(
const User &U,
338 auto *CI = cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
344 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1);
355 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
369 VRegs = getOrCreateVRegs(*Ret);
380 return CLI->
lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
383void IRTranslator::emitBranchForMergedCondition(
389 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
392 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
395 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
399 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
400 CurBuilder->getDebugLoc(), TProb, FProb);
401 SL->SwitchCases.push_back(CB);
409 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
410 SL->SwitchCases.push_back(CB);
415 return I->getParent() == BB;
419void IRTranslator::findMergedConditions(
424 using namespace PatternMatch;
425 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
426 "Expected Opc to be AND/OR");
432 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
438 const Value *BOpOp0, *BOpOp1;
452 if (BOpc == Instruction::And)
453 BOpc = Instruction::Or;
454 else if (BOpc == Instruction::Or)
455 BOpc = Instruction::And;
461 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
465 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
476 if (Opc == Instruction::Or) {
497 auto NewTrueProb = TProb / 2;
498 auto NewFalseProb = TProb / 2 + FProb;
500 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
501 NewFalseProb, InvertCond);
507 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
508 Probs[1], InvertCond);
510 assert(Opc == Instruction::And &&
"Unknown merge op!");
530 auto NewTrueProb = TProb + FProb / 2;
531 auto NewFalseProb = FProb / 2;
533 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
534 NewFalseProb, InvertCond);
540 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
541 Probs[1], InvertCond);
545bool IRTranslator::shouldEmitAsBranches(
546 const std::vector<SwitchCG::CaseBlock> &Cases) {
548 if (Cases.size() != 2)
553 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
554 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
555 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
556 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
562 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
563 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
564 isa<Constant>(Cases[0].CmpRHS) &&
565 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
567 Cases[0].TrueBB == Cases[1].ThisBB)
570 Cases[0].FalseBB == Cases[1].ThisBB)
578 const BranchInst &BrInst = cast<BranchInst>(U);
579 auto &CurMBB = MIRBuilder.
getMBB();
585 !CurMBB.isLayoutSuccessor(Succ0MBB))
590 CurMBB.addSuccessor(&getMBB(*Succ));
616 using namespace PatternMatch;
617 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
619 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
622 const Value *BOp0, *BOp1;
624 Opcode = Instruction::And;
626 Opcode = Instruction::Or;
630 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
631 getEdgeProbability(&CurMBB, Succ0MBB),
632 getEdgeProbability(&CurMBB, Succ1MBB),
634 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
637 if (shouldEmitAsBranches(SL->SwitchCases)) {
639 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
640 SL->SwitchCases.erase(SL->SwitchCases.begin());
646 for (
unsigned I = 1, E = SL->SwitchCases.size();
I != E; ++
I)
647 MF->
erase(SL->SwitchCases[
I].ThisBB);
649 SL->SwitchCases.clear();
656 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
657 CurBuilder->getDebugLoc());
661 emitSwitchCase(CB, &CurMBB, *CurBuilder);
669 Src->addSuccessorWithoutProb(Dst);
673 Prob = getEdgeProbability(Src, Dst);
674 Src->addSuccessor(Dst, Prob);
680 const BasicBlock *SrcBB = Src->getBasicBlock();
681 const BasicBlock *DstBB = Dst->getBasicBlock();
685 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
692 using namespace SwitchCG;
697 Clusters.reserve(
SI.getNumCases());
698 for (
const auto &
I :
SI.cases()) {
700 assert(Succ &&
"Could not find successor mbb in mapping");
705 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
718 if (Clusters.empty()) {
725 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
726 SL->findBitTestClusters(Clusters, &SI);
729 dbgs() <<
"Case clusters: ";
730 for (
const CaseCluster &
C : Clusters) {
731 if (
C.Kind == CC_JumpTable)
733 if (
C.Kind == CC_BitTests)
736 C.Low->getValue().print(
dbgs(),
true);
737 if (
C.Low !=
C.High) {
739 C.High->getValue().print(
dbgs(),
true);
746 assert(!Clusters.empty());
750 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
751 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
753 while (!WorkList.empty()) {
754 SwitchWorkListItem
W = WorkList.pop_back_val();
756 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
758 if (NumClusters > 3 &&
761 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
765 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
775 using namespace SwitchCG;
776 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
777 "Clusters not sorted?");
778 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
780 auto [LastLeft, FirstRight, LeftProb, RightProb] =
781 SL->computeSplitWorkItemInfo(W);
786 assert(PivotCluster >
W.FirstCluster);
787 assert(PivotCluster <=
W.LastCluster);
802 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
803 FirstLeft->Low ==
W.GE &&
804 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
805 LeftMBB = FirstLeft->MBB;
810 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
817 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
818 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
819 RightMBB = FirstRight->MBB;
824 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
832 if (
W.MBB == SwitchMBB)
833 emitSwitchCase(CB, SwitchMBB, MIB);
835 SL->SwitchCases.push_back(CB);
841 assert(
JT.Reg != -1U &&
"Should lower JT Header first!");
863 Register SwitchOpReg = getOrCreateVReg(SValue);
865 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
873 JT.Reg = Sub.getReg(0);
884 auto Cst = getOrCreateVReg(
921 const auto *CI = dyn_cast<ConstantInt>(CB.
CmpRHS);
939 "Can only handle SLE ranges");
945 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
951 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
986 bool FallthroughUnreachable) {
987 using namespace SwitchCG;
990 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
996 CurMF->
insert(BBI, JumpMBB);
1006 auto JumpProb =
I->Prob;
1007 auto FallthroughProb = UnhandledProbs;
1015 if (*SI == DefaultMBB) {
1016 JumpProb += DefaultProb / 2;
1017 FallthroughProb -= DefaultProb / 2;
1022 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1027 if (FallthroughUnreachable)
1028 JTH->FallthroughUnreachable =
true;
1030 if (!JTH->FallthroughUnreachable)
1031 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1032 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1037 JTH->HeaderBB = CurMBB;
1038 JT->Default = Fallthrough;
1041 if (CurMBB == SwitchMBB) {
1042 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1044 JTH->Emitted =
true;
1051 bool FallthroughUnreachable,
1056 using namespace SwitchCG;
1059 if (
I->Low ==
I->High) {
1075 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS,
I->MBB, Fallthrough,
1078 emitSwitchCase(CB, SwitchMBB, MIB);
1088 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1092 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1097 LLT MaskTy = SwitchOpTy;
1103 for (
unsigned I = 0, E =
B.Cases.size();
I != E; ++
I) {
1113 if (SwitchOpTy != MaskTy)
1121 if (!
B.FallthroughUnreachable)
1122 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1123 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1127 if (!
B.FallthroughUnreachable) {
1131 RangeSub, RangeCst);
1151 if (PopCount == 1) {
1154 auto MaskTrailingZeros =
1159 }
else if (PopCount == BB.
Range) {
1161 auto MaskTrailingOnes =
1168 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne, Reg);
1172 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1179 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1181 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1199bool IRTranslator::lowerBitTestWorkItem(
1205 bool FallthroughUnreachable) {
1206 using namespace SwitchCG;
1209 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1211 for (BitTestCase &BTC : BTB->Cases)
1212 CurMF->
insert(BBI, BTC.ThisBB);
1215 BTB->Parent = CurMBB;
1216 BTB->Default = Fallthrough;
1218 BTB->DefaultProb = UnhandledProbs;
1222 if (!BTB->ContiguousRange) {
1223 BTB->Prob += DefaultProb / 2;
1224 BTB->DefaultProb -= DefaultProb / 2;
1227 if (FallthroughUnreachable)
1228 BTB->FallthroughUnreachable =
true;
1231 if (CurMBB == SwitchMBB) {
1232 emitBitTestHeader(*BTB, SwitchMBB);
1233 BTB->Emitted =
true;
1243 using namespace SwitchCG;
1247 if (++BBI != FuncInfo.
MF->
end())
1256 [](
const CaseCluster &a,
const CaseCluster &b) {
1257 return a.Prob != b.Prob
1259 : a.Low->getValue().slt(b.Low->getValue());
1264 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1266 if (
I->Prob >
W.LastCluster->Prob)
1268 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1278 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1279 UnhandledProbs +=
I->Prob;
1282 for (CaseClusterIt
I =
W.FirstCluster, E =
W.LastCluster;
I <= E; ++
I) {
1283 bool FallthroughUnreachable =
false;
1285 if (
I ==
W.LastCluster) {
1287 Fallthrough = DefaultMBB;
1288 FallthroughUnreachable = isa<UnreachableInst>(
1292 CurMF->
insert(BBI, Fallthrough);
1294 UnhandledProbs -=
I->Prob;
1298 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1299 DefaultProb, UnhandledProbs,
I, Fallthrough,
1300 FallthroughUnreachable)) {
1308 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1309 UnhandledProbs,
I, Fallthrough,
1310 FallthroughUnreachable)) {
1317 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1318 FallthroughUnreachable, UnhandledProbs,
1319 CurMBB, MIB, SwitchMBB)) {
1326 CurMBB = Fallthrough;
1332bool IRTranslator::translateIndirectBr(
const User &U,
1346 if (!AddedSuccessors.
insert(Succ).second)
1355 if (
auto Arg = dyn_cast<Argument>(V))
1356 return Arg->hasSwiftErrorAttr();
1357 if (
auto AI = dyn_cast<AllocaInst>(V))
1363 const LoadInst &LI = cast<LoadInst>(U);
1378 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1396 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1401 Align BaseAlign = getMemOpAlign(LI);
1425 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1428 SI.getPointerOperand());
1435 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1440 Align BaseAlign = getMemOpAlign(SI);
1444 SI.getSyncScopeID(),
SI.getOrdering());
1451 const Value *Src = U.getOperand(0);
1457 Indices.
push_back(ConstantInt::get(Int32Ty, 0));
1460 for (
auto Idx : EVI->indices())
1462 }
else if (
const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1463 for (
auto Idx : IVI->indices())
1466 for (
unsigned i = 1; i < U.getNumOperands(); ++i)
1471 DL.getIndexedOffsetInType(Src->getType(), Indices));
1474bool IRTranslator::translateExtractValue(
const User &U,
1476 const Value *Src =
U.getOperand(0);
1481 auto &DstRegs = allocateVRegs(U);
1483 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1484 DstRegs[i] = SrcRegs[
Idx++];
1489bool IRTranslator::translateInsertValue(
const User &U,
1491 const Value *Src =
U.getOperand(0);
1493 auto &DstRegs = allocateVRegs(U);
1497 auto *InsertedIt = InsertedRegs.
begin();
1499 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1500 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1501 DstRegs[i] = *InsertedIt++;
1503 DstRegs[i] = SrcRegs[i];
1509bool IRTranslator::translateSelect(
const User &U,
1511 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1517 if (
const SelectInst *SI = dyn_cast<SelectInst>(&U))
1520 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1521 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1527bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1530 auto &Regs = *VMap.getVRegs(U);
1532 Regs.push_back(Src);
1533 VMap.getOffsets(U)->push_back(0);
1542bool IRTranslator::translateBitCast(
const User &U,
1549 if (isa<ConstantInt>(
U.getOperand(0)))
1550 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1552 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1555 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1558bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1560 if (
U.getType()->getScalarType()->isBFloatTy() ||
1561 U.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1574bool IRTranslator::translateGetElementPtr(
const User &U,
1576 Value &Op0 = *
U.getOperand(0);
1577 Register BaseReg = getOrCreateVReg(Op0);
1589 unsigned VectorWidth = 0;
1593 bool WantSplatVector =
false;
1594 if (
auto *VT = dyn_cast<VectorType>(
U.getType())) {
1595 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1597 WantSplatVector = VectorWidth > 1;
1602 if (WantSplatVector && !PtrTy.
isVector()) {
1603 BaseReg = MIRBuilder
1616 const Value *
Idx = GTI.getOperand();
1617 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
1618 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
1622 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1626 if (
const auto *CI = dyn_cast<ConstantInt>(
Idx)) {
1627 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1628 Offset += ElementSize * *Val;
1635 BaseReg = MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1642 if (IdxTy != OffsetTy) {
1643 if (!IdxTy.
isVector() && WantSplatVector) {
1656 if (ElementSize != 1) {
1662 GepOffsetReg = IdxReg;
1672 if (int64_t(
Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1675 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1680 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1684bool IRTranslator::translateMemFunc(
const CallInst &CI,
1689 if (isa<UndefValue>(SrcPtr))
1694 unsigned MinPtrSize = UINT_MAX;
1695 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1696 Register SrcReg = getOrCreateVReg(**AI);
1699 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1707 if (MRI->
getType(SizeOpReg) != SizeTy)
1721 if (
auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1722 DstAlign = MCI->getDestAlign().valueOrOne();
1723 SrcAlign = MCI->getSourceAlign().valueOrOne();
1724 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1725 }
else if (
auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1726 DstAlign = MCI->getDestAlign().valueOrOne();
1727 SrcAlign = MCI->getSourceAlign().valueOrOne();
1728 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1729 }
else if (
auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1730 DstAlign = MMI->getDestAlign().valueOrOne();
1731 SrcAlign = MMI->getSourceAlign().valueOrOne();
1732 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1734 auto *MSI = cast<MemSetInst>(&CI);
1735 DstAlign = MSI->getDestAlign().valueOrOne();
1738 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1754 if (AA && CopySize &&
1765 ICall.addMemOperand(
1767 StoreFlags, 1, DstAlign, AAInfo));
1768 if (Opcode != TargetOpcode::G_MEMSET)
1775bool IRTranslator::translateTrap(
const CallInst &CI,
1780 if (TrapFuncName.
empty()) {
1781 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1791 if (Opcode == TargetOpcode::G_UBSANTRAP)
1798 return CLI->
lowerCall(MIRBuilder, Info);
1801bool IRTranslator::translateVectorInterleave2Intrinsic(
1804 "This function can only be called on the interleave2 intrinsic!");
1808 Register Res = getOrCreateVReg(CI);
1817bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1820 "This function can only be called on the deinterleave2 intrinsic!");
1836void IRTranslator::getStackGuard(
Register DstReg,
1841 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1847 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1855 MIB.setMemRefs({
MemRef});
1858bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1862 Op, {ResRegs[0], ResRegs[1]},
1868bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1870 Register Dst = getOrCreateVReg(CI);
1874 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1882 case Intrinsic::acos:
1883 return TargetOpcode::G_FACOS;
1884 case Intrinsic::asin:
1885 return TargetOpcode::G_FASIN;
1886 case Intrinsic::atan:
1887 return TargetOpcode::G_FATAN;
1888 case Intrinsic::bswap:
1889 return TargetOpcode::G_BSWAP;
1890 case Intrinsic::bitreverse:
1891 return TargetOpcode::G_BITREVERSE;
1892 case Intrinsic::fshl:
1893 return TargetOpcode::G_FSHL;
1894 case Intrinsic::fshr:
1895 return TargetOpcode::G_FSHR;
1896 case Intrinsic::ceil:
1897 return TargetOpcode::G_FCEIL;
1898 case Intrinsic::cos:
1899 return TargetOpcode::G_FCOS;
1900 case Intrinsic::cosh:
1901 return TargetOpcode::G_FCOSH;
1902 case Intrinsic::ctpop:
1903 return TargetOpcode::G_CTPOP;
1904 case Intrinsic::exp:
1905 return TargetOpcode::G_FEXP;
1906 case Intrinsic::exp2:
1907 return TargetOpcode::G_FEXP2;
1908 case Intrinsic::exp10:
1909 return TargetOpcode::G_FEXP10;
1910 case Intrinsic::fabs:
1911 return TargetOpcode::G_FABS;
1912 case Intrinsic::copysign:
1913 return TargetOpcode::G_FCOPYSIGN;
1914 case Intrinsic::minnum:
1915 return TargetOpcode::G_FMINNUM;
1916 case Intrinsic::maxnum:
1917 return TargetOpcode::G_FMAXNUM;
1918 case Intrinsic::minimum:
1919 return TargetOpcode::G_FMINIMUM;
1920 case Intrinsic::maximum:
1921 return TargetOpcode::G_FMAXIMUM;
1922 case Intrinsic::canonicalize:
1923 return TargetOpcode::G_FCANONICALIZE;
1924 case Intrinsic::floor:
1925 return TargetOpcode::G_FFLOOR;
1926 case Intrinsic::fma:
1927 return TargetOpcode::G_FMA;
1928 case Intrinsic::log:
1929 return TargetOpcode::G_FLOG;
1930 case Intrinsic::log2:
1931 return TargetOpcode::G_FLOG2;
1932 case Intrinsic::log10:
1933 return TargetOpcode::G_FLOG10;
1934 case Intrinsic::ldexp:
1935 return TargetOpcode::G_FLDEXP;
1936 case Intrinsic::nearbyint:
1937 return TargetOpcode::G_FNEARBYINT;
1938 case Intrinsic::pow:
1939 return TargetOpcode::G_FPOW;
1940 case Intrinsic::powi:
1941 return TargetOpcode::G_FPOWI;
1942 case Intrinsic::rint:
1943 return TargetOpcode::G_FRINT;
1944 case Intrinsic::round:
1945 return TargetOpcode::G_INTRINSIC_ROUND;
1946 case Intrinsic::roundeven:
1947 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1948 case Intrinsic::sin:
1949 return TargetOpcode::G_FSIN;
1950 case Intrinsic::sinh:
1951 return TargetOpcode::G_FSINH;
1952 case Intrinsic::sqrt:
1953 return TargetOpcode::G_FSQRT;
1954 case Intrinsic::tan:
1955 return TargetOpcode::G_FTAN;
1956 case Intrinsic::tanh:
1957 return TargetOpcode::G_FTANH;
1958 case Intrinsic::trunc:
1959 return TargetOpcode::G_INTRINSIC_TRUNC;
1960 case Intrinsic::readcyclecounter:
1961 return TargetOpcode::G_READCYCLECOUNTER;
1962 case Intrinsic::readsteadycounter:
1963 return TargetOpcode::G_READSTEADYCOUNTER;
1964 case Intrinsic::ptrmask:
1965 return TargetOpcode::G_PTRMASK;
1966 case Intrinsic::lrint:
1967 return TargetOpcode::G_INTRINSIC_LRINT;
1968 case Intrinsic::llrint:
1969 return TargetOpcode::G_INTRINSIC_LLRINT;
1971 case Intrinsic::vector_reduce_fmin:
1972 return TargetOpcode::G_VECREDUCE_FMIN;
1973 case Intrinsic::vector_reduce_fmax:
1974 return TargetOpcode::G_VECREDUCE_FMAX;
1975 case Intrinsic::vector_reduce_fminimum:
1976 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1977 case Intrinsic::vector_reduce_fmaximum:
1978 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1979 case Intrinsic::vector_reduce_add:
1980 return TargetOpcode::G_VECREDUCE_ADD;
1981 case Intrinsic::vector_reduce_mul:
1982 return TargetOpcode::G_VECREDUCE_MUL;
1983 case Intrinsic::vector_reduce_and:
1984 return TargetOpcode::G_VECREDUCE_AND;
1985 case Intrinsic::vector_reduce_or:
1986 return TargetOpcode::G_VECREDUCE_OR;
1987 case Intrinsic::vector_reduce_xor:
1988 return TargetOpcode::G_VECREDUCE_XOR;
1989 case Intrinsic::vector_reduce_smax:
1990 return TargetOpcode::G_VECREDUCE_SMAX;
1991 case Intrinsic::vector_reduce_smin:
1992 return TargetOpcode::G_VECREDUCE_SMIN;
1993 case Intrinsic::vector_reduce_umax:
1994 return TargetOpcode::G_VECREDUCE_UMAX;
1995 case Intrinsic::vector_reduce_umin:
1996 return TargetOpcode::G_VECREDUCE_UMIN;
1997 case Intrinsic::experimental_vector_compress:
1998 return TargetOpcode::G_VECTOR_COMPRESS;
1999 case Intrinsic::lround:
2000 return TargetOpcode::G_LROUND;
2001 case Intrinsic::llround:
2002 return TargetOpcode::G_LLROUND;
2003 case Intrinsic::get_fpenv:
2004 return TargetOpcode::G_GET_FPENV;
2005 case Intrinsic::get_fpmode:
2006 return TargetOpcode::G_GET_FPMODE;
2011bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2015 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2023 for (
const auto &Arg : CI.
args())
2026 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2034 case Intrinsic::experimental_constrained_fadd:
2035 return TargetOpcode::G_STRICT_FADD;
2036 case Intrinsic::experimental_constrained_fsub:
2037 return TargetOpcode::G_STRICT_FSUB;
2038 case Intrinsic::experimental_constrained_fmul:
2039 return TargetOpcode::G_STRICT_FMUL;
2040 case Intrinsic::experimental_constrained_fdiv:
2041 return TargetOpcode::G_STRICT_FDIV;
2042 case Intrinsic::experimental_constrained_frem:
2043 return TargetOpcode::G_STRICT_FREM;
2044 case Intrinsic::experimental_constrained_fma:
2045 return TargetOpcode::G_STRICT_FMA;
2046 case Intrinsic::experimental_constrained_sqrt:
2047 return TargetOpcode::G_STRICT_FSQRT;
2048 case Intrinsic::experimental_constrained_ldexp:
2049 return TargetOpcode::G_STRICT_FLDEXP;
2055bool IRTranslator::translateConstrainedFPIntrinsic(
2075std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2076 auto VRegs = getOrCreateVRegs(Arg);
2077 if (VRegs.
size() != 1)
2078 return std::nullopt;
2082 if (!VRegDef || !VRegDef->isCopy())
2083 return std::nullopt;
2087bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2092 auto *Arg = dyn_cast<Argument>(Val);
2099 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2101 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2102 <<
": expression is entry_value but "
2103 <<
"couldn't find a physical register\n");
2123 case Intrinsic::experimental_convergence_anchor:
2124 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2125 case Intrinsic::experimental_convergence_entry:
2126 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2127 case Intrinsic::experimental_convergence_loop:
2128 return TargetOpcode::CONVERGENCECTRL_LOOP;
2132bool IRTranslator::translateConvergenceControlIntrinsic(
2135 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2138 if (
ID == Intrinsic::experimental_convergence_loop) {
2140 assert(Bundle &&
"Expected a convergence control token.");
2142 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2151 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
2152 if (ORE->enabled()) {
2162 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2168 case Intrinsic::lifetime_start:
2169 case Intrinsic::lifetime_end: {
2174 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2175 : TargetOpcode::LIFETIME_END;
2184 for (
const Value *V : Allocas) {
2185 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2196 case Intrinsic::dbg_declare: {
2203 case Intrinsic::dbg_label: {
2209 "Expected inlined-at fields to agree");
2214 case Intrinsic::vaend:
2218 case Intrinsic::vastart: {
2223 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
2226 ListSize, Alignment));
2229 case Intrinsic::dbg_assign:
2236 case Intrinsic::dbg_value: {
2243 case Intrinsic::uadd_with_overflow:
2244 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2245 case Intrinsic::sadd_with_overflow:
2246 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2247 case Intrinsic::usub_with_overflow:
2248 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2249 case Intrinsic::ssub_with_overflow:
2250 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2251 case Intrinsic::umul_with_overflow:
2252 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2253 case Intrinsic::smul_with_overflow:
2254 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2255 case Intrinsic::uadd_sat:
2256 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2257 case Intrinsic::sadd_sat:
2258 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2259 case Intrinsic::usub_sat:
2260 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2261 case Intrinsic::ssub_sat:
2262 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2263 case Intrinsic::ushl_sat:
2264 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2265 case Intrinsic::sshl_sat:
2266 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2267 case Intrinsic::umin:
2268 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2269 case Intrinsic::umax:
2270 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2271 case Intrinsic::smin:
2272 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2273 case Intrinsic::smax:
2274 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2275 case Intrinsic::abs:
2277 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2278 case Intrinsic::smul_fix:
2279 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2280 case Intrinsic::umul_fix:
2281 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2282 case Intrinsic::smul_fix_sat:
2283 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2284 case Intrinsic::umul_fix_sat:
2285 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2286 case Intrinsic::sdiv_fix:
2287 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2288 case Intrinsic::udiv_fix:
2289 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2290 case Intrinsic::sdiv_fix_sat:
2291 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2292 case Intrinsic::udiv_fix_sat:
2293 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2294 case Intrinsic::fmuladd: {
2296 Register Dst = getOrCreateVReg(CI);
2305 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2316 case Intrinsic::convert_from_fp16:
2322 case Intrinsic::convert_to_fp16:
2328 case Intrinsic::frexp: {
2335 case Intrinsic::memcpy_inline:
2336 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2337 case Intrinsic::memcpy:
2338 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2339 case Intrinsic::memmove:
2340 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2341 case Intrinsic::memset:
2342 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2343 case Intrinsic::eh_typeid_for: {
2350 case Intrinsic::objectsize:
2353 case Intrinsic::is_constant:
2356 case Intrinsic::stackguard:
2357 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2359 case Intrinsic::stackprotector: {
2364 getStackGuard(GuardVal, MIRBuilder);
2369 int FI = getOrCreateFrameIndex(*Slot);
2373 GuardVal, getOrCreateVReg(*Slot),
2380 case Intrinsic::stacksave: {
2381 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2384 case Intrinsic::stackrestore: {
2385 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2389 case Intrinsic::cttz:
2390 case Intrinsic::ctlz: {
2392 bool isTrailing =
ID == Intrinsic::cttz;
2393 unsigned Opcode = isTrailing
2394 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2395 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2396 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2397 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2398 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2402 case Intrinsic::invariant_start: {
2408 case Intrinsic::invariant_end:
2410 case Intrinsic::expect:
2411 case Intrinsic::annotation:
2412 case Intrinsic::ptr_annotation:
2413 case Intrinsic::launder_invariant_group:
2414 case Intrinsic::strip_invariant_group: {
2416 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2420 case Intrinsic::assume:
2421 case Intrinsic::experimental_noalias_scope_decl:
2422 case Intrinsic::var_annotation:
2423 case Intrinsic::sideeffect:
2426 case Intrinsic::read_volatile_register:
2427 case Intrinsic::read_register: {
2430 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2431 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2434 case Intrinsic::write_register: {
2436 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2437 .
addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2441 case Intrinsic::localescape: {
2449 if (isa<ConstantPointerNull>(Arg))
2452 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2467 case Intrinsic::vector_reduce_fadd:
2468 case Intrinsic::vector_reduce_fmul: {
2471 Register Dst = getOrCreateVReg(CI);
2477 Opc =
ID == Intrinsic::vector_reduce_fadd
2478 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2479 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2480 MIRBuilder.
buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2487 if (
ID == Intrinsic::vector_reduce_fadd) {
2488 Opc = TargetOpcode::G_VECREDUCE_FADD;
2489 ScalarOpc = TargetOpcode::G_FADD;
2491 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2492 ScalarOpc = TargetOpcode::G_FMUL;
2497 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2502 case Intrinsic::trap:
2503 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2504 case Intrinsic::debugtrap:
2505 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2506 case Intrinsic::ubsantrap:
2507 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2508 case Intrinsic::allow_runtime_check:
2509 case Intrinsic::allow_ubsan_check:
2510 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2513 case Intrinsic::amdgcn_cs_chain:
2514 return translateCallBase(CI, MIRBuilder);
2515 case Intrinsic::fptrunc_round: {
2520 std::optional<RoundingMode> RoundMode =
2525 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2526 {getOrCreateVReg(CI)},
2528 .addImm((
int)*RoundMode);
2532 case Intrinsic::is_fpclass: {
2537 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2538 {getOrCreateVReg(*FpValue)})
2543 case Intrinsic::set_fpenv: {
2548 case Intrinsic::reset_fpenv:
2551 case Intrinsic::set_fpmode: {
2556 case Intrinsic::reset_fpmode:
2559 case Intrinsic::vscale: {
2563 case Intrinsic::scmp:
2564 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2568 case Intrinsic::ucmp:
2569 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2573 case Intrinsic::prefetch: {
2575 unsigned RW = cast<ConstantInt>(CI.
getOperand(1))->getZExtValue();
2576 unsigned Locality = cast<ConstantInt>(CI.
getOperand(2))->getZExtValue();
2577 unsigned CacheType = cast<ConstantInt>(CI.
getOperand(3))->getZExtValue();
2589 case Intrinsic::vector_interleave2:
2590 case Intrinsic::vector_deinterleave2: {
2598 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2600 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2603#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2604 case Intrinsic::INTRINSIC:
2605#include "llvm/IR/ConstrainedOps.def"
2606 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2608 case Intrinsic::experimental_convergence_anchor:
2609 case Intrinsic::experimental_convergence_entry:
2610 case Intrinsic::experimental_convergence_loop:
2611 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2616bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2623 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2628 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2631bool IRTranslator::translateCallBase(
const CallBase &CB,
2638 for (
const auto &Arg : CB.
args()) {
2640 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2644 &CB, &MIRBuilder.
getMBB(), Arg));
2650 Args.push_back(getOrCreateVRegs(*Arg));
2653 if (
auto *CI = dyn_cast<CallInst>(&CB)) {
2654 if (ORE->enabled()) {
2662 std::optional<CallLowering::PtrAuthInfo> PAI;
2667 const Value *
Key = Bundle->Inputs[0];
2674 if (!CalleeCPA || !isa<Function>(CalleeCPA->getPointer()) ||
2675 !CalleeCPA->isKnownCompatibleWith(Key, Discriminator, *DL)) {
2677 Register DiscReg = getOrCreateVReg(*Discriminator);
2685 const auto &Token = *Bundle->Inputs[0].get();
2686 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2693 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2698 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2707 const CallInst &CI = cast<CallInst>(U);
2713 if (
F && (
F->hasDLLImportStorageClass() ||
2715 F->hasExternalWeakLinkage())))
2723 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2727 return translateInlineAsm(CI, MIRBuilder);
2732 if (
F &&
F->isIntrinsic()) {
2733 ID =
F->getIntrinsicID();
2739 return translateCallBase(CI, MIRBuilder);
2743 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2748 ResultRegs = getOrCreateVRegs(CI);
2753 if (isa<FPMathOperator>(CI))
2760 if (
ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2763 assert(CI->getBitWidth() <= 64 &&
2764 "large intrinsic immediates not handled");
2765 MIB.
addImm(CI->getSExtValue());
2767 MIB.
addFPImm(cast<ConstantFP>(Arg.value()));
2769 }
else if (
auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2770 auto *MD = MDVal->getMetadata();
2771 auto *MDN = dyn_cast<MDNode>(MD);
2773 if (
auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2781 if (VRegs.
size() > 1)
2792 DL->getABITypeAlign(
Info.memVT.getTypeForEVT(
F->getContext())));
2793 LLT MemTy =
Info.memVT.isSimple()
2795 :
LLT::scalar(
Info.memVT.getStoreSizeInBits());
2802 else if (
Info.fallbackAddressSpace)
2810 auto *Token = Bundle->Inputs[0].get();
2811 Register TokenReg = getOrCreateVReg(*Token);
2819bool IRTranslator::findUnwindDestinations(
2839 if (isa<LandingPadInst>(Pad)) {
2841 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2844 if (isa<CleanupPadInst>(Pad)) {
2847 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2848 UnwindDests.
back().first->setIsEHScopeEntry();
2849 UnwindDests.back().first->setIsEHFuncletEntry();
2852 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2854 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2855 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2857 if (IsMSVCCXX || IsCoreCLR)
2858 UnwindDests.back().first->setIsEHFuncletEntry();
2860 UnwindDests.back().first->setIsEHScopeEntry();
2862 NewEHPadBB = CatchSwitch->getUnwindDest();
2868 if (BPI && NewEHPadBB)
2870 EHPadBB = NewEHPadBB;
2875bool IRTranslator::translateInvoke(
const User &U,
2883 const Function *Fn =
I.getCalledFunction();
2890 if (
I.hasDeoptState())
2908 bool LowerInlineAsm =
I.isInlineAsm();
2909 bool NeedEHLabel =
true;
2915 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2920 if (LowerInlineAsm) {
2921 if (!translateInlineAsm(
I, MIRBuilder))
2923 }
else if (!translateCallBase(
I, MIRBuilder))
2939 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2943 &ReturnMBB = getMBB(*ReturnBB);
2945 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2946 for (
auto &UnwindDest : UnwindDests) {
2947 UnwindDest.first->setIsEHPad();
2948 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2953 assert(BeginSymbol &&
"Expected a begin symbol!");
2954 assert(EndSymbol &&
"Expected an end symbol!");
2955 MF->
addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2958 MIRBuilder.
buildBr(ReturnMBB);
2962bool IRTranslator::translateCallBr(
const User &U,
2968bool IRTranslator::translateLandingPad(
const User &U,
2992 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
2998 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3006 for (
Type *Ty : cast<StructType>(LP.
getType())->elements())
3008 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3017 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3025 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3026 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3031bool IRTranslator::translateAlloca(
const User &U,
3033 auto &AI = cast<AllocaInst>(U);
3039 Register Res = getOrCreateVReg(AI);
3040 int FI = getOrCreateFrameIndex(AI);
3053 if (MRI->
getType(NumElts) != IntPtrTy) {
3063 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy,
DL->getTypeAllocSize(Ty)));
3064 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3071 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3075 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3078 if (Alignment <= StackAlign)
3079 Alignment =
Align(1);
3092 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3093 {getOrCreateVReg(*
U.getOperand(0)),
3094 DL->getABITypeAlign(
U.getType()).value()});
3102 auto &UI = cast<UnreachableInst>(U);
3105 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(UI.getPrevNode());
3106 Call &&
Call->doesNotReturn()) {
3110 if (
Call->isNonContinuableTrap())
3118bool IRTranslator::translateInsertElement(
const User &U,
3122 if (
auto *FVT = dyn_cast<FixedVectorType>(
U.getType());
3123 FVT && FVT->getNumElements() == 1)
3124 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3127 Register Val = getOrCreateVReg(*
U.getOperand(0));
3128 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3131 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(2))) {
3132 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3133 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3134 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3135 Idx = getOrCreateVReg(*NewIdxCI);
3139 Idx = getOrCreateVReg(*
U.getOperand(2));
3148bool IRTranslator::translateExtractElement(
const User &U,
3152 if (cast<FixedVectorType>(
U.getOperand(0)->getType())->getNumElements() == 1)
3153 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3156 Register Val = getOrCreateVReg(*
U.getOperand(0));
3159 if (
auto *CI = dyn_cast<ConstantInt>(
U.getOperand(1))) {
3160 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3161 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3162 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3163 Idx = getOrCreateVReg(*NewIdxCI);
3167 Idx = getOrCreateVReg(*
U.getOperand(1));
3176bool IRTranslator::translateShuffleVector(
const User &U,
3182 if (
U.getOperand(0)->getType()->isScalableTy()) {
3183 Value *Op0 =
U.getOperand(0);
3186 getOrCreateVReg(*Op0), 0);
3192 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
3193 Mask = SVI->getShuffleMask();
3195 Mask = cast<ConstantExpr>(U).getShuffleMask();
3198 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3199 {getOrCreateVReg(*
U.getOperand(0)),
3200 getOrCreateVReg(*
U.getOperand(1))})
3201 .addShuffleMask(MaskAlloc);
3206 const PHINode &PI = cast<PHINode>(U);
3209 for (
auto Reg : getOrCreateVRegs(PI)) {
3210 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3214 PendingPHIs.emplace_back(&PI, std::move(Insts));
3218bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3224 auto Res = getOrCreateVRegs(
I);
3228 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3229 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3232 OldValRes, SuccessRes,
Addr, Cmp, NewVal,
3235 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3236 I.getSuccessOrdering(),
I.getFailureOrdering()));
3240bool IRTranslator::translateAtomicRMW(
const User &U,
3247 Register Val = getOrCreateVReg(*
I.getValOperand());
3249 unsigned Opcode = 0;
3250 switch (
I.getOperation()) {
3254 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3257 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3260 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3263 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3266 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3269 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3272 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3275 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3278 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3281 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3284 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3287 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3290 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3293 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3296 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3299 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3302 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3307 Opcode, Res,
Addr, Val,
3309 Flags, MRI->
getType(Val), getMemOpAlign(
I),
3310 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3315bool IRTranslator::translateFence(
const User &U,
3317 const FenceInst &Fence = cast<FenceInst>(U);
3323bool IRTranslator::translateFreeze(
const User &U,
3329 "Freeze with different source and destination type?");
3331 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3338void IRTranslator::finishPendingPhis() {
3344 for (
auto &Phi : PendingPHIs) {
3359 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3363 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3373void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3379 "Expected inlined-at fields to agree");
3383 if (!V || HasArgList) {
3390 if (
const auto *CI = dyn_cast<Constant>(V)) {
3395 if (
auto *AI = dyn_cast<AllocaInst>(V);
3400 auto ExprOperands =
Expression->getElements();
3401 auto *ExprDerefRemoved =
3407 if (translateIfEntryValueArgument(
false, V, Variable,
Expression, DL,
3410 for (
Register Reg : getOrCreateVRegs(*V)) {
3420void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3426 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3431 "Expected inlined-at fields to agree");
3432 auto AI = dyn_cast<AllocaInst>(
Address);
3437 getOrCreateFrameIndex(*AI), DL);
3441 if (translateIfEntryValueArgument(
true,
Address, Variable,
3454void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3459 assert(DLR->getLabel() &&
"Missing label");
3460 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3462 "Expected inlined-at fields to agree");
3479bool IRTranslator::translate(
const Instruction &Inst) {
3481 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3482 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3488#define HANDLE_INST(NUM, OPCODE, CLASS) \
3489 case Instruction::OPCODE: \
3490 return translate##OPCODE(Inst, *CurBuilder.get());
3491#include "llvm/IR/Instruction.def"
3500 if (
auto CurrInstDL = CurBuilder->getDL())
3501 EntryBuilder->setDebugLoc(
DebugLoc());
3503 if (
auto CI = dyn_cast<ConstantInt>(&
C))
3504 EntryBuilder->buildConstant(Reg, *CI);
3505 else if (
auto CF = dyn_cast<ConstantFP>(&
C))
3506 EntryBuilder->buildFConstant(Reg, *CF);
3507 else if (isa<UndefValue>(
C))
3508 EntryBuilder->buildUndef(Reg);
3509 else if (isa<ConstantPointerNull>(
C))
3510 EntryBuilder->buildConstant(Reg, 0);
3511 else if (
auto GV = dyn_cast<GlobalValue>(&
C))
3512 EntryBuilder->buildGlobalValue(Reg, GV);
3513 else if (
auto CPA = dyn_cast<ConstantPtrAuth>(&
C)) {
3515 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3516 EntryBuilder->buildConstantPtrAuth(Reg, CPA,
Addr, AddrDisc);
3517 }
else if (
auto CAZ = dyn_cast<ConstantAggregateZero>(&
C)) {
3518 if (!isa<FixedVectorType>(CAZ->getType()))
3521 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3523 return translateCopy(
C, *CAZ->getElementValue(0u), *EntryBuilder);
3525 for (
unsigned I = 0;
I < NumElts; ++
I) {
3526 Constant &Elt = *CAZ->getElementValue(
I);
3529 EntryBuilder->buildBuildVector(Reg, Ops);
3530 }
else if (
auto CV = dyn_cast<ConstantDataVector>(&
C)) {
3532 if (CV->getNumElements() == 1)
3533 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3535 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3536 Constant &Elt = *CV->getElementAsConstant(i);
3539 EntryBuilder->buildBuildVector(Reg, Ops);
3540 }
else if (
auto CE = dyn_cast<ConstantExpr>(&
C)) {
3541 switch(
CE->getOpcode()) {
3542#define HANDLE_INST(NUM, OPCODE, CLASS) \
3543 case Instruction::OPCODE: \
3544 return translate##OPCODE(*CE, *EntryBuilder.get());
3545#include "llvm/IR/Instruction.def"
3549 }
else if (
auto CV = dyn_cast<ConstantVector>(&
C)) {
3550 if (CV->getNumOperands() == 1)
3551 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3553 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3554 Ops.
push_back(getOrCreateVReg(*CV->getOperand(i)));
3556 EntryBuilder->buildBuildVector(Reg, Ops);
3557 }
else if (
auto *BA = dyn_cast<BlockAddress>(&
C)) {
3558 EntryBuilder->buildBlockAddress(Reg, BA);
3565bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3567 for (
auto &BTB : SL->BitTestCases) {
3570 emitBitTestHeader(BTB, BTB.Parent);
3573 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3574 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3586 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3589 NextMBB = BTB.Cases[
j + 1].TargetBB;
3590 }
else if (j + 1 == ej) {
3592 NextMBB = BTB.Default;
3595 NextMBB = BTB.Cases[
j + 1].ThisBB;
3598 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3600 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3604 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3605 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3608 BTB.Cases.pop_back();
3614 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3615 BTB.Default->getBasicBlock()};
3616 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3617 if (!BTB.ContiguousRange) {
3618 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3621 SL->BitTestCases.clear();
3623 for (
auto &JTCase : SL->JTCases) {
3625 if (!JTCase.first.Emitted)
3626 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3628 emitJumpTable(JTCase.second, JTCase.second.MBB);
3630 SL->JTCases.clear();
3632 for (
auto &SwCase : SL->SwitchCases)
3633 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3634 SL->SwitchCases.clear();
3639 bool FunctionBasedInstrumentation =
3641 SPDescriptor.
initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3661 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3665 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3670 if (FailureMBB->
empty()) {
3671 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3683 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3693 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3700 ->buildLoad(PtrMemTy, StackSlotPtr,
3706 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3724 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3726 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3729 {GuardVal, FnTy->getParamType(0), {
Flags}});
3732 Info.OrigArgs.push_back(GuardArgInfo);
3733 Info.CallConv = GuardCheckFn->getCallingConv();
3736 if (!CLI->
lowerCall(MIRBuilder, Info)) {
3737 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3749 getStackGuard(Guard, *CurBuilder);
3753 Register GuardPtr = getOrCreateVReg(*IRGuard);
3756 ->buildLoad(PtrMemTy, GuardPtr,
3775 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
3785 if (!CLI->
lowerCall(*CurBuilder, Info)) {
3786 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
3796 if (
TM.getTargetTriple().isPS() ||
TM.getTargetTriple().isWasm()) {
3797 LLVM_DEBUG(
dbgs() <<
"Unhandled trap emission for stack protector fail\n");
3803void IRTranslator::finalizeFunction() {
3806 PendingPHIs.clear();
3808 FrameIndices.clear();
3809 MachinePreds.clear();
3813 EntryBuilder.reset();
3828 const auto *CI = dyn_cast<CallInst>(&
I);
3837 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3840 TPC = &getAnalysis<TargetPassConfig>();
3847 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3849 EntryBuilder->setCSEInfo(CSEInfo);
3850 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3851 CurBuilder->setCSEInfo(CSEInfo);
3853 EntryBuilder = std::make_unique<MachineIRBuilder>();
3854 CurBuilder = std::make_unique<MachineIRBuilder>();
3857 CurBuilder->setMF(*MF);
3858 EntryBuilder->setMF(*MF);
3860 DL = &
F.getDataLayout();
3861 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
3863 TM.resetTargetOptions(
F);
3867 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3868 FuncInfo.
BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3871 FuncInfo.
BPI =
nullptr;
3874 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3876 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
3879 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
3880 SL->init(*TLI,
TM, *
DL);
3882 assert(PendingPHIs.empty() &&
"stale PHIs");
3889 F.getSubprogram(), &
F.getEntryBlock());
3890 R <<
"unable to translate in big endian mode";
3895 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
3900 EntryBuilder->setMBB(*EntryBB);
3902 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3906 bool IsVarArg =
F.isVarArg();
3907 bool HasMustTailInVarArgFn =
false;
3910 FuncInfo.
MBBMap.resize(
F.getMaxBlockNumber());
3920 if (!HasMustTailInVarArgFn)
3927 EntryBB->addSuccessor(&getMBB(
F.front()));
3931 F.getSubprogram(), &
F.getEntryBlock());
3932 R <<
"unable to lower function: " <<
ore::NV(
"Prototype",
F.getType());
3940 if (
DL->getTypeStoreSize(Arg.
getType()).isZero())
3945 if (Arg.hasSwiftErrorAttr()) {
3946 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
3953 F.getSubprogram(), &
F.getEntryBlock());
3954 R <<
"unable to lower arguments: " <<
ore::NV(
"Prototype",
F.getType());
3961 if (EnableCSE && CSEInfo)
3974 CurBuilder->setMBB(
MBB);
3975 HasTailCall =
false;
3989 translateDbgInfo(Inst, *CurBuilder);
3991 if (translate(Inst))
3996 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
3998 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
3999 std::string InstStrStorage;
4003 R <<
": '" << InstStrStorage <<
"'";
4010 if (!finalizeBasicBlock(*BB,
MBB)) {
4012 BB->getTerminator()->getDebugLoc(), BB);
4013 R <<
"unable to translate basic block";
4023 finishPendingPhis();
4030 assert(EntryBB->succ_size() == 1 &&
4031 "Custom BB used for lowering should have only one successor");
4035 "LLVM-IR entry block has a predecessor!?");
4038 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4047 EntryBB->removeSuccessor(&NewEntryBB);
4052 "New entry wasn't next in the list of basic block!");
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
Legalize the Machine IR a function s Machine IR
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Attribute getFnAttr(Attribute::AttrKind Kind) const
Return the attribute object that exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
unsigned getNumber() const
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction & back() const
Legacy analysis pass which computes BlockFrequencyInfo.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
bool isMustTailCall() const
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
virtual bool enableBigEndian() const
For targets which want to use big-endian can enable it with enableBigEndian() hook.
virtual bool supportSwiftError() const
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
virtual bool fallBackToDAGISel(const MachineFunction &MF) const
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
This represents the llvm.dbg.declare instruction.
Value * getAddress() const
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
BranchProbabilityInfo * BPI
void clear()
clear - Clear out all the function-specific state.
MachineBasicBlock * getMBB(const BasicBlock *BB) const
SmallVector< MachineBasicBlock * > MBBMap
A mapping from LLVM basic block number to their machine block.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
This instruction compares its operands according to the predicate given to the constructor.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Indirect Branch Instruction.
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
Context object for machine code objects.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
void setAddressTakenIRBlock(BasicBlock *BB)
Set this block to reflect that it corresponds to an IR-level basic block with a BlockAddress.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
void setHasMustTailInVarArgFunc(bool B)
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
void push_back(MachineBasicBlock *MBB)
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MCSymbol * addLandingPad(MachineBasicBlock *LandingPad)
Add a new panding pad, and extract the exception handling information from the landingpad instruction...
void deleteMachineBasicBlock(MachineBasicBlock *MBB)
DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
void remove(iterator MBBI)
void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr, int Slot, const DILocation *Loc)
Collect information used to emit debugging information of a variable in a stack slot.
const MachineBasicBlock & front() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
Helper class to build MachineInstr.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.