64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
117 MF.getProperties().setFailedISel();
121 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
122 R << (
" (in function: " + MF.getName() +
")").str();
124 if (TPC.isGlobalISelAbortEnabled())
141 DILocationVerifier() =
default;
142 ~DILocationVerifier()
override =
default;
144 const Instruction *getCurrentInst()
const {
return CurrInst; }
145 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
147 void erasingInstr(MachineInstr &
MI)
override {}
148 void changingInstr(MachineInstr &
MI)
override {}
149 void changedInstr(MachineInstr &
MI)
override {}
151 void createdInstr(MachineInstr &
MI)
override {
152 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
157 <<
" was copied to " <<
MI);
163 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
164 (
MI.isDebugInstr())) &&
165 "Line info was not transferred to all instructions");
187IRTranslator::ValueToVRegInfo::VRegListT &
188IRTranslator::allocateVRegs(
const Value &Val) {
189 auto VRegsIt = VMap.findVRegs(Val);
190 if (VRegsIt != VMap.vregs_end())
191 return *VRegsIt->second;
192 auto *Regs = VMap.getVRegs(Val);
193 auto *Offsets = VMap.getOffsets(Val);
196 Offsets->empty() ? Offsets :
nullptr);
197 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
203 auto VRegsIt = VMap.findVRegs(Val);
204 if (VRegsIt != VMap.vregs_end())
205 return *VRegsIt->second;
208 return *VMap.getVRegs(Val);
211 auto *VRegs = VMap.getVRegs(Val);
212 auto *Offsets = VMap.getOffsets(Val);
216 "Don't know how to create an empty vreg");
220 Offsets->empty() ? Offsets :
nullptr);
223 for (
auto Ty : SplitTys)
224 VRegs->push_back(
MRI->createGenericVirtualRegister(Ty));
232 while (
auto Elt =
C.getAggregateElement(Idx++)) {
233 auto EltRegs = getOrCreateVRegs(*Elt);
237 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
238 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
241 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
242 MF->getFunction().getSubprogram(),
243 &MF->getFunction().getEntryBlock());
244 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
253int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
254 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
256 return MapEntry->second;
263 Size = std::max<uint64_t>(
Size, 1u);
265 int &FI = MapEntry->second;
266 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
272 return SI->getAlign();
274 return LI->getAlign();
280 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
281 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
287 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
288 assert(
MBB &&
"BasicBlock was not encountered before");
293 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
294 MachinePreds[
Edge].push_back(NewPred);
305 return U.getType()->getScalarType()->isBFloatTy() ||
307 return V->getType()->getScalarType()->isBFloatTy();
311bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
320 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
321 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
333bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
338 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
350 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
353bool IRTranslator::translateCompare(
const User &U,
359 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
360 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
365 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
373 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
381 if (Ret && DL->getTypeStoreSize(
Ret->getType()).isZero())
386 VRegs = getOrCreateVRegs(*Ret);
389 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
390 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
391 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
397 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
400void IRTranslator::emitBranchForMergedCondition(
409 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
412 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
415 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
416 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
417 CurBuilder->getDebugLoc(), TProb, FProb);
418 SL->SwitchCases.push_back(CB);
424 SwitchCG::CaseBlock CB(
426 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
427 SL->SwitchCases.push_back(CB);
432 return I->getParent() == BB;
436void IRTranslator::findMergedConditions(
441 using namespace PatternMatch;
442 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
443 "Expected Opc to be AND/OR");
449 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
455 const Value *BOpOp0, *BOpOp1;
469 if (BOpc == Instruction::And)
470 BOpc = Instruction::Or;
471 else if (BOpc == Instruction::Or)
472 BOpc = Instruction::And;
478 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
482 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
489 MachineBasicBlock *TmpBB =
493 if (
Opc == Instruction::Or) {
514 auto NewTrueProb = TProb / 2;
515 auto NewFalseProb = TProb / 2 + FProb;
517 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
518 NewFalseProb, InvertCond);
524 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
525 Probs[1], InvertCond);
527 assert(
Opc == Instruction::And &&
"Unknown merge op!");
547 auto NewTrueProb = TProb + FProb / 2;
548 auto NewFalseProb = FProb / 2;
550 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
551 NewFalseProb, InvertCond);
557 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
558 Probs[1], InvertCond);
562bool IRTranslator::shouldEmitAsBranches(
563 const std::vector<SwitchCG::CaseBlock> &Cases) {
565 if (Cases.size() != 2)
570 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
571 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
572 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
573 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
579 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
580 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
584 Cases[0].TrueBB == Cases[1].ThisBB)
587 Cases[0].FalseBB == Cases[1].ThisBB)
596 auto &CurMBB = MIRBuilder.
getMBB();
602 !CurMBB.isLayoutSuccessor(Succ0MBB))
606 for (
const BasicBlock *Succ :
successors(&BrInst))
607 CurMBB.addSuccessor(&getMBB(*Succ));
614 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
633 using namespace PatternMatch;
635 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
636 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
639 const Value *BOp0, *BOp1;
641 Opcode = Instruction::And;
643 Opcode = Instruction::Or;
647 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
648 getEdgeProbability(&CurMBB, Succ0MBB),
649 getEdgeProbability(&CurMBB, Succ1MBB),
651 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
654 if (shouldEmitAsBranches(SL->SwitchCases)) {
656 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
657 SL->SwitchCases.erase(SL->SwitchCases.begin());
663 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
664 MF->erase(SL->SwitchCases[
I].ThisBB);
666 SL->SwitchCases.clear();
673 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
674 CurBuilder->getDebugLoc());
678 emitSwitchCase(CB, &CurMBB, *CurBuilder);
686 Src->addSuccessorWithoutProb(Dst);
690 Prob = getEdgeProbability(Src, Dst);
691 Src->addSuccessor(Dst, Prob);
697 const BasicBlock *SrcBB = Src->getBasicBlock();
698 const BasicBlock *DstBB = Dst->getBasicBlock();
702 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
703 return BranchProbability(1, SuccSize);
705 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
709 using namespace SwitchCG;
712 BranchProbabilityInfo *BPI = FuncInfo.BPI;
714 Clusters.reserve(
SI.getNumCases());
715 for (
const auto &
I :
SI.cases()) {
716 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
717 assert(Succ &&
"Could not find successor mbb in mapping");
718 const ConstantInt *CaseVal =
I.getCaseValue();
719 BranchProbability Prob =
721 : BranchProbability(1,
SI.getNumCases() + 1);
722 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
725 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
732 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
735 if (Clusters.empty()) {
742 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
743 SL->findBitTestClusters(Clusters, &SI);
746 dbgs() <<
"Case clusters: ";
747 for (
const CaseCluster &
C : Clusters) {
748 if (
C.Kind == CC_JumpTable)
750 if (
C.Kind == CC_BitTests)
753 C.Low->getValue().print(
dbgs(),
true);
754 if (
C.Low !=
C.High) {
756 C.High->getValue().print(
dbgs(),
true);
763 assert(!Clusters.empty());
767 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
768 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
770 while (!WorkList.empty()) {
771 SwitchWorkListItem
W = WorkList.pop_back_val();
773 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
775 if (NumClusters > 3 &&
778 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
782 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
792 using namespace SwitchCG;
793 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
794 "Clusters not sorted?");
795 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
797 auto [LastLeft, FirstRight, LeftProb, RightProb] =
798 SL->computeSplitWorkItemInfo(W);
803 assert(PivotCluster >
W.FirstCluster);
804 assert(PivotCluster <=
W.LastCluster);
809 const ConstantInt *Pivot = PivotCluster->Low;
818 MachineBasicBlock *LeftMBB;
819 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
820 FirstLeft->Low ==
W.GE &&
821 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
822 LeftMBB = FirstLeft->MBB;
824 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
825 FuncInfo.MF->
insert(BBI, LeftMBB);
827 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
833 MachineBasicBlock *RightMBB;
834 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
835 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
836 RightMBB = FirstRight->MBB;
838 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
839 FuncInfo.MF->
insert(BBI, RightMBB);
841 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
849 if (
W.MBB == SwitchMBB)
850 emitSwitchCase(CB, SwitchMBB, MIB);
852 SL->SwitchCases.push_back(CB);
858 assert(
JT.Reg &&
"Should lower JT Header first!");
873 MachineIRBuilder MIB(*HeaderBB->
getParent());
880 Register SwitchOpReg = getOrCreateVReg(SValue);
882 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
887 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
890 JT.Reg =
Sub.getReg(0);
901 auto Cst = getOrCreateVReg(
942 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
956 "Can only handle SLE ranges");
967 const LLT CmpTy = MRI->getType(CmpOpReg);
968 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1003 bool FallthroughUnreachable) {
1004 using namespace SwitchCG;
1005 MachineFunction *CurMF = SwitchMBB->
getParent();
1007 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1008 SwitchCG::JumpTable *
JT = &SL->JTCases[
I->JTCasesIndex].second;
1009 BranchProbability DefaultProb =
W.DefaultProb;
1012 MachineBasicBlock *JumpMBB =
JT->MBB;
1013 CurMF->
insert(BBI, JumpMBB);
1023 auto JumpProb =
I->Prob;
1024 auto FallthroughProb = UnhandledProbs;
1032 if (*SI == DefaultMBB) {
1033 JumpProb += DefaultProb / 2;
1034 FallthroughProb -= DefaultProb / 2;
1039 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1044 if (FallthroughUnreachable)
1045 JTH->FallthroughUnreachable =
true;
1047 if (!JTH->FallthroughUnreachable)
1048 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1049 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1054 JTH->HeaderBB = CurMBB;
1055 JT->Default = Fallthrough;
1058 if (CurMBB == SwitchMBB) {
1059 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1061 JTH->Emitted =
true;
1068 bool FallthroughUnreachable,
1073 using namespace SwitchCG;
1076 if (
I->Low ==
I->High) {
1092 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1095 emitSwitchCase(CB, SwitchMBB, MIB);
1101 MachineIRBuilder &MIB = *CurBuilder;
1105 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1107 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1109 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1114 LLT MaskTy = SwitchOpTy;
1120 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1130 if (SwitchOpTy != MaskTy)
1136 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1138 if (!
B.FallthroughUnreachable)
1139 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1140 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1144 if (!
B.FallthroughUnreachable) {
1148 RangeSub, RangeCst);
1162 MachineIRBuilder &MIB = *CurBuilder;
1168 if (PopCount == 1) {
1171 auto MaskTrailingZeros =
1176 }
else if (PopCount == BB.
Range) {
1178 auto MaskTrailingOnes =
1185 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1189 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1196 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1198 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1216bool IRTranslator::lowerBitTestWorkItem(
1222 bool FallthroughUnreachable) {
1223 using namespace SwitchCG;
1224 MachineFunction *CurMF = SwitchMBB->
getParent();
1226 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1228 for (BitTestCase &BTC : BTB->Cases)
1229 CurMF->
insert(BBI, BTC.ThisBB);
1232 BTB->Parent = CurMBB;
1233 BTB->Default = Fallthrough;
1235 BTB->DefaultProb = UnhandledProbs;
1239 if (!BTB->ContiguousRange) {
1240 BTB->Prob += DefaultProb / 2;
1241 BTB->DefaultProb -= DefaultProb / 2;
1244 if (FallthroughUnreachable)
1245 BTB->FallthroughUnreachable =
true;
1248 if (CurMBB == SwitchMBB) {
1249 emitBitTestHeader(*BTB, SwitchMBB);
1250 BTB->Emitted =
true;
1260 using namespace SwitchCG;
1261 MachineFunction *CurMF = FuncInfo.MF;
1262 MachineBasicBlock *NextMBB =
nullptr;
1264 if (++BBI != FuncInfo.MF->end())
1273 [](
const CaseCluster &a,
const CaseCluster &b) {
1274 return a.Prob != b.Prob
1276 : a.Low->getValue().slt(b.Low->getValue());
1281 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1283 if (
I->Prob >
W.LastCluster->Prob)
1285 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1293 BranchProbability DefaultProb =
W.DefaultProb;
1294 BranchProbability UnhandledProbs = DefaultProb;
1295 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1296 UnhandledProbs +=
I->Prob;
1298 MachineBasicBlock *CurMBB =
W.MBB;
1299 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1300 bool FallthroughUnreachable =
false;
1301 MachineBasicBlock *Fallthrough;
1302 if (
I ==
W.LastCluster) {
1304 Fallthrough = DefaultMBB;
1309 CurMF->
insert(BBI, Fallthrough);
1311 UnhandledProbs -=
I->Prob;
1315 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1316 DefaultProb, UnhandledProbs,
I, Fallthrough,
1317 FallthroughUnreachable)) {
1325 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1326 UnhandledProbs,
I, Fallthrough,
1327 FallthroughUnreachable)) {
1334 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1335 FallthroughUnreachable, UnhandledProbs,
1336 CurMBB, MIB, SwitchMBB)) {
1343 CurMBB = Fallthrough;
1349bool IRTranslator::translateIndirectBr(
const User &U,
1357 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1358 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1359 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1363 if (!AddedSuccessors.
insert(Succ).second)
1373 return Arg->hasSwiftErrorAttr();
1381 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1386 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1391 Type *OffsetIRTy = DL->getIndexType(
Ptr->getType());
1395 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1397 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(),
Ptr);
1403 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1405 if (AA->pointsToConstantMemory(
1413 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1418 Align BaseAlign = getMemOpAlign(LI);
1419 auto MMO = MF->getMachineMemOperand(
1420 Ptr, Flags, MRI->getType(Regs[i]),
1423 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1431 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1435 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1438 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1441 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1442 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1444 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1445 SI.getPointerOperand());
1452 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1456 MachinePointerInfo
Ptr(
SI.getPointerOperand(), Offsets[i] / 8);
1457 Align BaseAlign = getMemOpAlign(SI);
1458 auto MMO = MF->getMachineMemOperand(
1459 Ptr, Flags, MRI->getType(Vals[i]),
1461 SI.getSyncScopeID(),
SI.getOrdering());
1468 const Value *Src = U.getOperand(0);
1477 for (
auto Idx : EVI->indices())
1480 for (
auto Idx : IVI->indices())
1487 DL.getIndexedOffsetInType(Src->getType(), Indices));
1490bool IRTranslator::translateExtractValue(
const User &U,
1492 const Value *Src =
U.getOperand(0);
1495 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1497 auto &DstRegs = allocateVRegs(U);
1499 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1500 DstRegs[i] = SrcRegs[Idx++];
1505bool IRTranslator::translateInsertValue(
const User &U,
1507 const Value *Src =
U.getOperand(0);
1509 auto &DstRegs = allocateVRegs(U);
1510 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1513 auto *InsertedIt = InsertedRegs.
begin();
1515 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1516 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1517 DstRegs[i] = *InsertedIt++;
1519 DstRegs[i] = SrcRegs[i];
1525bool IRTranslator::translateSelect(
const User &U,
1527 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1536 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1537 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1543bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1546 auto &Regs = *VMap.getVRegs(U);
1548 Regs.push_back(Src);
1549 VMap.getOffsets(U)->push_back(0);
1558bool IRTranslator::translateBitCast(
const User &U,
1566 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1568 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1571 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1574bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1589bool IRTranslator::translateGetElementPtr(
const User &U,
1591 Value &Op0 = *
U.getOperand(0);
1595 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1598 uint32_t PtrAddFlags = 0;
1604 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1614 unsigned VectorWidth = 0;
1618 bool WantSplatVector =
false;
1622 WantSplatVector = VectorWidth > 1;
1627 if (WantSplatVector && !PtrTy.
isVector()) {
1634 OffsetIRTy = DL->getIndexType(PtrIRTy);
1641 const Value *Idx = GTI.getOperand();
1642 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1644 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1647 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1652 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1653 Offset += ElementSize * *Val;
1662 PtrAddFlagsWithConst(
Offset))
1667 Register IdxReg = getOrCreateVReg(*Idx);
1668 LLT IdxTy = MRI->getType(IdxReg);
1669 if (IdxTy != OffsetTy) {
1670 if (!IdxTy.
isVector() && WantSplatVector) {
1683 if (ElementSize != 1) {
1694 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1697 GepOffsetReg = IdxReg;
1701 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1710 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1711 PtrAddFlagsWithConst(
Offset));
1715 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1719bool IRTranslator::translateMemFunc(
const CallInst &CI,
1729 unsigned MinPtrSize = UINT_MAX;
1730 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1731 Register SrcReg = getOrCreateVReg(**AI);
1732 LLT SrcTy = MRI->getType(SrcReg);
1734 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1742 if (MRI->getType(SizeOpReg) != SizeTy)
1754 ConstantInt *CopySize =
nullptr;
1757 DstAlign = MCI->getDestAlign().valueOrOne();
1758 SrcAlign = MCI->getSourceAlign().valueOrOne();
1761 DstAlign = MMI->getDestAlign().valueOrOne();
1762 SrcAlign = MMI->getSourceAlign().valueOrOne();
1766 DstAlign = MSI->getDestAlign().valueOrOne();
1769 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1785 if (AA && CopySize &&
1786 AA->pointsToConstantMemory(MemoryLocation(
1796 ICall.addMemOperand(
1797 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1798 StoreFlags, 1, DstAlign, AAInfo));
1799 if (Opcode != TargetOpcode::G_MEMSET)
1800 ICall.addMemOperand(MF->getMachineMemOperand(
1801 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1806bool IRTranslator::translateTrap(
const CallInst &CI,
1809 StringRef TrapFuncName =
1810 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1811 if (TrapFuncName.
empty()) {
1812 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1821 CallLowering::CallLoweringInfo
Info;
1822 if (Opcode == TargetOpcode::G_UBSANTRAP)
1829 return CLI->lowerCall(MIRBuilder,
Info);
1832bool IRTranslator::translateVectorInterleave2Intrinsic(
1835 "This function can only be called on the interleave2 intrinsic!");
1839 Register Res = getOrCreateVReg(CI);
1841 LLT OpTy = MRI->getType(Op0);
1848bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1851 "This function can only be called on the deinterleave2 intrinsic!");
1858 LLT ResTy = MRI->getType(Res[0]);
1867void IRTranslator::getStackGuard(
Register DstReg,
1869 Value *
Global = TLI->getSDagStackGuard(*MF->getFunction().getParent());
1872 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1877 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1878 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1880 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1882 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1883 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1885 MachinePointerInfo MPInfo(
Global);
1888 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1889 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1890 MIB.setMemRefs({MemRef});
1893bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1897 Op, {ResRegs[0], ResRegs[1]},
1903bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1905 Register Dst = getOrCreateVReg(CI);
1909 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1917 case Intrinsic::acos:
1918 return TargetOpcode::G_FACOS;
1919 case Intrinsic::asin:
1920 return TargetOpcode::G_FASIN;
1921 case Intrinsic::atan:
1922 return TargetOpcode::G_FATAN;
1923 case Intrinsic::atan2:
1924 return TargetOpcode::G_FATAN2;
1925 case Intrinsic::bswap:
1926 return TargetOpcode::G_BSWAP;
1927 case Intrinsic::bitreverse:
1928 return TargetOpcode::G_BITREVERSE;
1929 case Intrinsic::fshl:
1930 return TargetOpcode::G_FSHL;
1931 case Intrinsic::fshr:
1932 return TargetOpcode::G_FSHR;
1933 case Intrinsic::ceil:
1934 return TargetOpcode::G_FCEIL;
1935 case Intrinsic::cos:
1936 return TargetOpcode::G_FCOS;
1937 case Intrinsic::cosh:
1938 return TargetOpcode::G_FCOSH;
1939 case Intrinsic::ctpop:
1940 return TargetOpcode::G_CTPOP;
1941 case Intrinsic::exp:
1942 return TargetOpcode::G_FEXP;
1943 case Intrinsic::exp2:
1944 return TargetOpcode::G_FEXP2;
1945 case Intrinsic::exp10:
1946 return TargetOpcode::G_FEXP10;
1947 case Intrinsic::fabs:
1948 return TargetOpcode::G_FABS;
1949 case Intrinsic::copysign:
1950 return TargetOpcode::G_FCOPYSIGN;
1951 case Intrinsic::minnum:
1952 return TargetOpcode::G_FMINNUM;
1953 case Intrinsic::maxnum:
1954 return TargetOpcode::G_FMAXNUM;
1955 case Intrinsic::minimum:
1956 return TargetOpcode::G_FMINIMUM;
1957 case Intrinsic::maximum:
1958 return TargetOpcode::G_FMAXIMUM;
1959 case Intrinsic::minimumnum:
1960 return TargetOpcode::G_FMINIMUMNUM;
1961 case Intrinsic::maximumnum:
1962 return TargetOpcode::G_FMAXIMUMNUM;
1963 case Intrinsic::canonicalize:
1964 return TargetOpcode::G_FCANONICALIZE;
1965 case Intrinsic::floor:
1966 return TargetOpcode::G_FFLOOR;
1967 case Intrinsic::fma:
1968 return TargetOpcode::G_FMA;
1969 case Intrinsic::log:
1970 return TargetOpcode::G_FLOG;
1971 case Intrinsic::log2:
1972 return TargetOpcode::G_FLOG2;
1973 case Intrinsic::log10:
1974 return TargetOpcode::G_FLOG10;
1975 case Intrinsic::ldexp:
1976 return TargetOpcode::G_FLDEXP;
1977 case Intrinsic::nearbyint:
1978 return TargetOpcode::G_FNEARBYINT;
1979 case Intrinsic::pow:
1980 return TargetOpcode::G_FPOW;
1981 case Intrinsic::powi:
1982 return TargetOpcode::G_FPOWI;
1983 case Intrinsic::rint:
1984 return TargetOpcode::G_FRINT;
1985 case Intrinsic::round:
1986 return TargetOpcode::G_INTRINSIC_ROUND;
1987 case Intrinsic::roundeven:
1988 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1989 case Intrinsic::sin:
1990 return TargetOpcode::G_FSIN;
1991 case Intrinsic::sinh:
1992 return TargetOpcode::G_FSINH;
1993 case Intrinsic::sqrt:
1994 return TargetOpcode::G_FSQRT;
1995 case Intrinsic::tan:
1996 return TargetOpcode::G_FTAN;
1997 case Intrinsic::tanh:
1998 return TargetOpcode::G_FTANH;
1999 case Intrinsic::trunc:
2000 return TargetOpcode::G_INTRINSIC_TRUNC;
2001 case Intrinsic::readcyclecounter:
2002 return TargetOpcode::G_READCYCLECOUNTER;
2003 case Intrinsic::readsteadycounter:
2004 return TargetOpcode::G_READSTEADYCOUNTER;
2005 case Intrinsic::ptrmask:
2006 return TargetOpcode::G_PTRMASK;
2007 case Intrinsic::lrint:
2008 return TargetOpcode::G_INTRINSIC_LRINT;
2009 case Intrinsic::llrint:
2010 return TargetOpcode::G_INTRINSIC_LLRINT;
2012 case Intrinsic::vector_reduce_fmin:
2013 return TargetOpcode::G_VECREDUCE_FMIN;
2014 case Intrinsic::vector_reduce_fmax:
2015 return TargetOpcode::G_VECREDUCE_FMAX;
2016 case Intrinsic::vector_reduce_fminimum:
2017 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2018 case Intrinsic::vector_reduce_fmaximum:
2019 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2020 case Intrinsic::vector_reduce_add:
2021 return TargetOpcode::G_VECREDUCE_ADD;
2022 case Intrinsic::vector_reduce_mul:
2023 return TargetOpcode::G_VECREDUCE_MUL;
2024 case Intrinsic::vector_reduce_and:
2025 return TargetOpcode::G_VECREDUCE_AND;
2026 case Intrinsic::vector_reduce_or:
2027 return TargetOpcode::G_VECREDUCE_OR;
2028 case Intrinsic::vector_reduce_xor:
2029 return TargetOpcode::G_VECREDUCE_XOR;
2030 case Intrinsic::vector_reduce_smax:
2031 return TargetOpcode::G_VECREDUCE_SMAX;
2032 case Intrinsic::vector_reduce_smin:
2033 return TargetOpcode::G_VECREDUCE_SMIN;
2034 case Intrinsic::vector_reduce_umax:
2035 return TargetOpcode::G_VECREDUCE_UMAX;
2036 case Intrinsic::vector_reduce_umin:
2037 return TargetOpcode::G_VECREDUCE_UMIN;
2038 case Intrinsic::experimental_vector_compress:
2039 return TargetOpcode::G_VECTOR_COMPRESS;
2040 case Intrinsic::lround:
2041 return TargetOpcode::G_LROUND;
2042 case Intrinsic::llround:
2043 return TargetOpcode::G_LLROUND;
2044 case Intrinsic::get_fpenv:
2045 return TargetOpcode::G_GET_FPENV;
2046 case Intrinsic::get_fpmode:
2047 return TargetOpcode::G_GET_FPMODE;
2052bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2056 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2064 for (
const auto &Arg : CI.
args())
2067 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2075 case Intrinsic::experimental_constrained_fadd:
2076 return TargetOpcode::G_STRICT_FADD;
2077 case Intrinsic::experimental_constrained_fsub:
2078 return TargetOpcode::G_STRICT_FSUB;
2079 case Intrinsic::experimental_constrained_fmul:
2080 return TargetOpcode::G_STRICT_FMUL;
2081 case Intrinsic::experimental_constrained_fdiv:
2082 return TargetOpcode::G_STRICT_FDIV;
2083 case Intrinsic::experimental_constrained_frem:
2084 return TargetOpcode::G_STRICT_FREM;
2085 case Intrinsic::experimental_constrained_fma:
2086 return TargetOpcode::G_STRICT_FMA;
2087 case Intrinsic::experimental_constrained_sqrt:
2088 return TargetOpcode::G_STRICT_FSQRT;
2089 case Intrinsic::experimental_constrained_ldexp:
2090 return TargetOpcode::G_STRICT_FLDEXP;
2096bool IRTranslator::translateConstrainedFPIntrinsic(
2116std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2117 auto VRegs = getOrCreateVRegs(Arg);
2118 if (VRegs.
size() != 1)
2119 return std::nullopt;
2122 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2123 if (!VRegDef || !VRegDef->isCopy())
2124 return std::nullopt;
2125 return VRegDef->getOperand(1).getReg().asMCReg();
2128bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2140 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2142 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2143 <<
": expression is entry_value but "
2144 <<
"couldn't find a physical register\n");
2152 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2164 case Intrinsic::experimental_convergence_anchor:
2165 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2166 case Intrinsic::experimental_convergence_entry:
2167 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2168 case Intrinsic::experimental_convergence_loop:
2169 return TargetOpcode::CONVERGENCECTRL_LOOP;
2173bool IRTranslator::translateConvergenceControlIntrinsic(
2176 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2179 if (
ID == Intrinsic::experimental_convergence_loop) {
2181 assert(Bundle &&
"Expected a convergence control token.");
2183 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2193 if (ORE->enabled()) {
2195 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2203 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2209 case Intrinsic::lifetime_start:
2210 case Intrinsic::lifetime_end: {
2213 MF->getFunction().hasOptNone())
2216 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2217 : TargetOpcode::LIFETIME_END;
2226 case Intrinsic::fake_use: {
2228 for (
const auto &Arg : CI.
args())
2230 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2231 MF->setHasFakeUses(
true);
2234 case Intrinsic::dbg_declare: {
2241 case Intrinsic::dbg_label: {
2247 "Expected inlined-at fields to agree");
2252 case Intrinsic::vaend:
2256 case Intrinsic::vastart: {
2258 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2261 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*
Ptr)})
2262 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(
Ptr),
2264 ListSize, Alignment));
2267 case Intrinsic::dbg_assign:
2274 case Intrinsic::dbg_value: {
2281 case Intrinsic::uadd_with_overflow:
2282 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2283 case Intrinsic::sadd_with_overflow:
2284 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2285 case Intrinsic::usub_with_overflow:
2286 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2287 case Intrinsic::ssub_with_overflow:
2288 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2289 case Intrinsic::umul_with_overflow:
2290 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2291 case Intrinsic::smul_with_overflow:
2292 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2293 case Intrinsic::uadd_sat:
2294 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2295 case Intrinsic::sadd_sat:
2296 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2297 case Intrinsic::usub_sat:
2298 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2299 case Intrinsic::ssub_sat:
2300 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2301 case Intrinsic::ushl_sat:
2302 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2303 case Intrinsic::sshl_sat:
2304 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2305 case Intrinsic::umin:
2306 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2307 case Intrinsic::umax:
2308 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2309 case Intrinsic::smin:
2310 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2311 case Intrinsic::smax:
2312 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2313 case Intrinsic::abs:
2315 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2316 case Intrinsic::smul_fix:
2317 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2318 case Intrinsic::umul_fix:
2319 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2320 case Intrinsic::smul_fix_sat:
2321 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2322 case Intrinsic::umul_fix_sat:
2323 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2324 case Intrinsic::sdiv_fix:
2325 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2326 case Intrinsic::udiv_fix:
2327 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2328 case Intrinsic::sdiv_fix_sat:
2329 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2330 case Intrinsic::udiv_fix_sat:
2331 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2332 case Intrinsic::fmuladd: {
2333 const TargetMachine &
TM = MF->getTarget();
2334 Register Dst = getOrCreateVReg(CI);
2339 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2340 TLI->getValueType(*DL, CI.
getType()))) {
2343 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2354 case Intrinsic::convert_from_fp16:
2360 case Intrinsic::convert_to_fp16:
2366 case Intrinsic::frexp: {
2373 case Intrinsic::modf: {
2375 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2380 case Intrinsic::sincos: {
2387 case Intrinsic::fptosi_sat:
2391 case Intrinsic::fptoui_sat:
2395 case Intrinsic::memcpy_inline:
2396 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2397 case Intrinsic::memcpy:
2398 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2399 case Intrinsic::memmove:
2400 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2401 case Intrinsic::memset:
2402 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2403 case Intrinsic::eh_typeid_for: {
2406 unsigned TypeID = MF->getTypeIDFor(GV);
2410 case Intrinsic::objectsize:
2413 case Intrinsic::is_constant:
2416 case Intrinsic::stackguard:
2417 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2419 case Intrinsic::stackprotector: {
2422 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2423 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2424 getStackGuard(GuardVal, MIRBuilder);
2429 int FI = getOrCreateFrameIndex(*Slot);
2430 MF->getFrameInfo().setStackProtectorIndex(FI);
2433 GuardVal, getOrCreateVReg(*Slot),
2440 case Intrinsic::stacksave: {
2441 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2444 case Intrinsic::stackrestore: {
2445 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2449 case Intrinsic::cttz:
2450 case Intrinsic::ctlz: {
2452 bool isTrailing =
ID == Intrinsic::cttz;
2453 unsigned Opcode = isTrailing
2454 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2455 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2456 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2457 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2458 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2462 case Intrinsic::invariant_start: {
2466 case Intrinsic::invariant_end:
2468 case Intrinsic::expect:
2469 case Intrinsic::expect_with_probability:
2470 case Intrinsic::annotation:
2471 case Intrinsic::ptr_annotation:
2472 case Intrinsic::launder_invariant_group:
2473 case Intrinsic::strip_invariant_group: {
2475 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2479 case Intrinsic::assume:
2480 case Intrinsic::experimental_noalias_scope_decl:
2481 case Intrinsic::var_annotation:
2482 case Intrinsic::sideeffect:
2485 case Intrinsic::read_volatile_register:
2486 case Intrinsic::read_register: {
2489 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2493 case Intrinsic::write_register: {
2495 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2500 case Intrinsic::localescape: {
2501 MachineBasicBlock &EntryMBB = MF->front();
2506 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2513 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2526 case Intrinsic::vector_reduce_fadd:
2527 case Intrinsic::vector_reduce_fmul: {
2530 Register Dst = getOrCreateVReg(CI);
2536 Opc =
ID == Intrinsic::vector_reduce_fadd
2537 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2538 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2539 if (!MRI->getType(VecSrc).isVector())
2540 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2541 : TargetOpcode::G_FMUL;
2549 if (
ID == Intrinsic::vector_reduce_fadd) {
2550 Opc = TargetOpcode::G_VECREDUCE_FADD;
2551 ScalarOpc = TargetOpcode::G_FADD;
2553 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2554 ScalarOpc = TargetOpcode::G_FMUL;
2556 LLT DstTy = MRI->getType(Dst);
2559 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2564 case Intrinsic::trap:
2565 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2566 case Intrinsic::debugtrap:
2567 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2568 case Intrinsic::ubsantrap:
2569 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2570 case Intrinsic::allow_runtime_check:
2571 case Intrinsic::allow_ubsan_check:
2572 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2575 case Intrinsic::amdgcn_cs_chain:
2576 case Intrinsic::amdgcn_call_whole_wave:
2577 return translateCallBase(CI, MIRBuilder);
2578 case Intrinsic::fptrunc_round: {
2583 std::optional<RoundingMode> RoundMode =
2588 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2589 {getOrCreateVReg(CI)},
2591 .addImm((
int)*RoundMode);
2595 case Intrinsic::is_fpclass: {
2600 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2601 {getOrCreateVReg(*FpValue)})
2606 case Intrinsic::set_fpenv: {
2611 case Intrinsic::reset_fpenv:
2614 case Intrinsic::set_fpmode: {
2619 case Intrinsic::reset_fpmode:
2622 case Intrinsic::get_rounding:
2625 case Intrinsic::set_rounding:
2628 case Intrinsic::vscale: {
2632 case Intrinsic::scmp:
2633 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2637 case Intrinsic::ucmp:
2638 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2642 case Intrinsic::vector_extract:
2643 return translateExtractVector(CI, MIRBuilder);
2644 case Intrinsic::vector_insert:
2645 return translateInsertVector(CI, MIRBuilder);
2646 case Intrinsic::stepvector: {
2650 case Intrinsic::prefetch: {
2657 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2660 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2666 case Intrinsic::vector_interleave2:
2667 case Intrinsic::vector_deinterleave2: {
2675 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2677 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2680#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2681 case Intrinsic::INTRINSIC:
2682#include "llvm/IR/ConstrainedOps.def"
2685 case Intrinsic::experimental_convergence_anchor:
2686 case Intrinsic::experimental_convergence_entry:
2687 case Intrinsic::experimental_convergence_loop:
2688 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2693bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2698 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2702 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2707 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2710bool IRTranslator::translateCallBase(
const CallBase &CB,
2717 for (
const auto &Arg : CB.
args()) {
2719 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2721 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2722 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2723 &CB, &MIRBuilder.
getMBB(), Arg));
2726 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2729 Args.push_back(getOrCreateVRegs(*Arg));
2733 if (ORE->enabled()) {
2735 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2741 std::optional<CallLowering::PtrAuthInfo> PAI;
2746 const Value *
Key = Bundle->Inputs[0];
2753 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2754 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2756 Register DiscReg = getOrCreateVReg(*Discriminator);
2764 const auto &Token = *Bundle->Inputs[0].get();
2765 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2771 bool Success = CLI->lowerCall(
2772 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2777 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2778 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2794 if (
F && (
F->hasDLLImportStorageClass() ||
2795 (MF->getTarget().getTargetTriple().isOSWindows() &&
2796 F->hasExternalWeakLinkage())))
2808 return translateInlineAsm(CI, MIRBuilder);
2812 if (translateCallBase(CI, MIRBuilder)) {
2821 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2826 ResultRegs = getOrCreateVRegs(CI);
2841 assert(CI->getBitWidth() <= 64 &&
2842 "large intrinsic immediates not handled");
2843 MIB.
addImm(CI->getSExtValue());
2848 auto *MD = MDVal->getMetadata();
2852 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2859 if (VRegs.
size() > 1)
2866 TargetLowering::IntrinsicInfo
Info;
2868 if (TLI->getTgtMemIntrinsic(
Info, CI, *MF,
ID)) {
2870 DL->getABITypeAlign(
Info.memVT.getTypeForEVT(
F->getContext())));
2871 LLT MemTy =
Info.memVT.isSimple()
2873 : LLT::scalar(
Info.memVT.getStoreSizeInBits());
2877 MachinePointerInfo MPI;
2879 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
2880 else if (
Info.fallbackAddressSpace)
2881 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
2889 auto *Token = Bundle->Inputs[0].get();
2890 Register TokenReg = getOrCreateVReg(*Token);
2898bool IRTranslator::findUnwindDestinations(
2920 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2926 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2927 UnwindDests.back().first->setIsEHScopeEntry();
2928 UnwindDests.back().first->setIsEHFuncletEntry();
2933 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2934 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2936 if (IsMSVCCXX || IsCoreCLR)
2937 UnwindDests.back().first->setIsEHFuncletEntry();
2939 UnwindDests.back().first->setIsEHScopeEntry();
2941 NewEHPadBB = CatchSwitch->getUnwindDest();
2946 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2947 if (BPI && NewEHPadBB)
2949 EHPadBB = NewEHPadBB;
2954bool IRTranslator::translateInvoke(
const User &U,
2957 MCContext &
Context = MF->getContext();
2962 const Function *Fn =
I.getCalledFunction();
2969 if (
I.hasDeoptState())
2983 (MF->getTarget().getTargetTriple().isOSWindows() &&
2987 bool LowerInlineAsm =
I.isInlineAsm();
2988 bool NeedEHLabel =
true;
2994 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2995 BeginSymbol =
Context.createTempSymbol();
2999 if (LowerInlineAsm) {
3000 if (!translateInlineAsm(
I, MIRBuilder))
3002 }
else if (!translateCallBase(
I, MIRBuilder))
3007 EndSymbol =
Context.createTempSymbol();
3012 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3013 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3014 BranchProbability EHPadBBProb =
3018 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3021 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3022 &ReturnMBB = getMBB(*ReturnBB);
3024 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3025 for (
auto &UnwindDest : UnwindDests) {
3026 UnwindDest.first->setIsEHPad();
3027 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3032 assert(BeginSymbol &&
"Expected a begin symbol!");
3033 assert(EndSymbol &&
"Expected an end symbol!");
3034 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3037 MIRBuilder.
buildBr(ReturnMBB);
3041bool IRTranslator::translateCallBr(
const User &U,
3047bool IRTranslator::translateLandingPad(
const User &U,
3051 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3057 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3058 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3059 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3071 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3076 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3077 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3078 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3087 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3090 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3096 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3098 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3103 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3104 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3105 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3110bool IRTranslator::translateAlloca(
const User &U,
3118 Register Res = getOrCreateVReg(AI);
3119 int FI = getOrCreateFrameIndex(AI);
3125 if (MF->getTarget().getTargetTriple().isOSWindows())
3130 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3132 if (MRI->getType(NumElts) != IntPtrTy) {
3133 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3140 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3142 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3143 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3148 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3150 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3154 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3156 Align Alignment = std::max(AI.
getAlign(), DL->getPrefTypeAlign(Ty));
3157 if (Alignment <= StackAlign)
3158 Alignment =
Align(1);
3161 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3162 assert(MF->getFrameInfo().hasVarSizedObjects());
3171 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3172 {getOrCreateVReg(*
U.getOperand(0)),
3173 DL->getABITypeAlign(
U.getType()).value()});
3177bool IRTranslator::translateUnreachable(
const User &U,
3180 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3181 MF->getTarget().Options.NoTrapAfterNoreturn))
3188bool IRTranslator::translateInsertElement(
const User &U,
3193 FVT && FVT->getNumElements() == 1)
3194 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3197 Register Val = getOrCreateVReg(*
U.getOperand(0));
3198 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3199 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3202 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3203 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3204 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3205 Idx = getOrCreateVReg(*NewIdxCI);
3209 Idx = getOrCreateVReg(*
U.getOperand(2));
3210 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3211 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3218bool IRTranslator::translateInsertVector(
const User &U,
3221 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3222 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3225 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3230 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3235 ResultType && ResultType->getNumElements() == 1) {
3237 InputType && InputType->getNumElements() == 1) {
3241 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3247 Register Idx = getOrCreateVReg(*CI);
3255 Register Idx = getOrCreateVReg(*CI);
3256 auto ScaledIndex = MIRBuilder.
buildMul(
3257 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3264 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3269bool IRTranslator::translateExtractElement(
const User &U,
3273 if (
const FixedVectorType *FVT =
3275 if (FVT->getNumElements() == 1)
3276 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3279 Register Val = getOrCreateVReg(*
U.getOperand(0));
3280 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3285 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3286 Idx = getOrCreateVReg(*NewIdxCI);
3290 Idx = getOrCreateVReg(*
U.getOperand(1));
3291 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3292 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3299bool IRTranslator::translateExtractVector(
const User &U,
3302 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3304 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3309 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3314 ResultType && ResultType->getNumElements() == 1) {
3316 InputType && InputType->getNumElements() == 1) {
3319 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3325 Register Idx = getOrCreateVReg(*CI);
3333 Register Idx = getOrCreateVReg(*CI);
3334 auto ScaledIndex = MIRBuilder.
buildMul(
3335 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3342 getOrCreateVReg(*
U.getOperand(0)),
3347bool IRTranslator::translateShuffleVector(
const User &U,
3353 if (
U.getOperand(0)->getType()->isScalableTy()) {
3354 Register Val = getOrCreateVReg(*
U.getOperand(0));
3356 MRI->getType(Val).getElementType(), Val, 0);
3363 Mask = SVI->getShuffleMask();
3374 unsigned M =
Mask[0];
3376 if (M == 0 || M == 1)
3377 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3383 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3384 }
else if (M < SrcElts * 2) {
3386 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3398 for (
int M : Mask) {
3400 if (M == 0 || M == 1) {
3401 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3403 if (!
Undef.isValid()) {
3404 Undef = MRI->createGenericVirtualRegister(SrcTy);
3407 Ops.push_back(Undef);
3414 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3416 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3417 {getOrCreateVReg(*
U.getOperand(0)),
3418 getOrCreateVReg(*
U.getOperand(1))})
3419 .addShuffleMask(MaskAlloc);
3426 SmallVector<MachineInstr *, 4> Insts;
3427 for (
auto Reg : getOrCreateVRegs(PI)) {
3428 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3432 PendingPHIs.emplace_back(&PI, std::move(Insts));
3436bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3440 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3442 auto Res = getOrCreateVRegs(
I);
3445 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3446 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3447 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3450 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3451 *MF->getMachineMemOperand(
3452 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3453 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3454 I.getSuccessOrdering(),
I.getFailureOrdering()));
3458bool IRTranslator::translateAtomicRMW(
const User &U,
3464 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3467 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3468 Register Val = getOrCreateVReg(*
I.getValOperand());
3470 unsigned Opcode = 0;
3471 switch (
I.getOperation()) {
3475 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3478 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3481 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3484 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3487 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3490 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3493 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3496 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3499 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3502 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3505 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3508 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3511 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3514 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3517 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3520 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3523 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3526 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3529 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3532 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3535 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3540 Opcode, Res, Addr, Val,
3541 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3542 Flags, MRI->getType(Val), getMemOpAlign(
I),
3543 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3548bool IRTranslator::translateFence(
const User &U,
3556bool IRTranslator::translateFreeze(
const User &U,
3562 "Freeze with different source and destination type?");
3564 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3571void IRTranslator::finishPendingPhis() {
3574 GISelObserverWrapper WrapperObserver(&
Verifier);
3575 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3577 for (
auto &Phi : PendingPHIs) {
3578 const PHINode *PI =
Phi.first;
3582 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3588 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3592 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3596 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3597 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3606void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3612 "Expected inlined-at fields to agree");
3616 if (!V || HasArgList) {
3634 auto *ExprDerefRemoved =
3640 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3652void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3658 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3663 "Expected inlined-at fields to agree");
3668 MF->setVariableDbgInfo(Variable, Expression,
3669 getOrCreateFrameIndex(*AI), DL);
3673 if (translateIfEntryValueArgument(
true,
Address, Variable,
3685void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3690 assert(DLR->getLabel() &&
"Missing label");
3691 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3693 "Expected inlined-at fields to agree");
3702 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3705 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3710bool IRTranslator::translate(
const Instruction &Inst) {
3712 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3713 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3715 if (TLI->fallBackToDAGISel(Inst))
3719#define HANDLE_INST(NUM, OPCODE, CLASS) \
3720 case Instruction::OPCODE: \
3721 return translate##OPCODE(Inst, *CurBuilder.get());
3722#include "llvm/IR/Instruction.def"
3731 if (
auto CurrInstDL = CurBuilder->getDL())
3732 EntryBuilder->setDebugLoc(
DebugLoc());
3738 EntryBuilder->buildConstant(
Reg, *CI);
3742 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3743 EntryBuilder->buildFConstant(
Reg, *CF);
3745 EntryBuilder->buildUndef(
Reg);
3747 EntryBuilder->buildConstant(
Reg, 0);
3749 EntryBuilder->buildGlobalValue(
Reg, GV);
3751 Register Addr = getOrCreateVReg(*CPA->getPointer());
3752 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3753 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3755 Constant &Elt = *CAZ->getElementValue(0u);
3757 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3761 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3763 return translateCopy(
C, Elt, *EntryBuilder);
3765 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3768 if (CV->getNumElements() == 1)
3769 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3771 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3772 Constant &Elt = *CV->getElementAsConstant(i);
3773 Ops.push_back(getOrCreateVReg(Elt));
3775 EntryBuilder->buildBuildVector(
Reg,
Ops);
3777 switch(
CE->getOpcode()) {
3778#define HANDLE_INST(NUM, OPCODE, CLASS) \
3779 case Instruction::OPCODE: \
3780 return translate##OPCODE(*CE, *EntryBuilder.get());
3781#include "llvm/IR/Instruction.def"
3786 if (CV->getNumOperands() == 1)
3787 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3789 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3790 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3792 EntryBuilder->buildBuildVector(
Reg,
Ops);
3794 EntryBuilder->buildBlockAddress(
Reg, BA);
3801bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3803 for (
auto &BTB : SL->BitTestCases) {
3806 emitBitTestHeader(BTB, BTB.Parent);
3808 BranchProbability UnhandledProb = BTB.Prob;
3809 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3810 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3812 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3821 MachineBasicBlock *NextMBB;
3822 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3825 NextMBB = BTB.Cases[
j + 1].TargetBB;
3826 }
else if (j + 1 == ej) {
3828 NextMBB = BTB.Default;
3831 NextMBB = BTB.Cases[
j + 1].ThisBB;
3834 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3836 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3840 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3841 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3844 BTB.Cases.pop_back();
3850 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3851 BTB.Default->getBasicBlock()};
3852 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3853 if (!BTB.ContiguousRange) {
3854 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3857 SL->BitTestCases.clear();
3859 for (
auto &JTCase : SL->JTCases) {
3861 if (!JTCase.first.Emitted)
3862 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3864 emitJumpTable(JTCase.second, JTCase.second.MBB);
3866 SL->JTCases.clear();
3868 for (
auto &SwCase : SL->SwitchCases)
3869 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3870 SL->SwitchCases.clear();
3874 if (
SP.shouldEmitSDCheck(BB)) {
3875 bool FunctionBasedInstrumentation =
3876 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent());
3877 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3880 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3883 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3884 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3885 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3894 ParentMBB, *MF->getSubtarget().getInstrInfo());
3897 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3901 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3905 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3906 if (FailureMBB->
empty()) {
3907 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3912 SPDescriptor.resetPerBBState();
3919 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3923 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
3929 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3936 ->buildLoad(PtrMemTy, StackSlotPtr,
3941 if (TLI->useStackGuardXorFP()) {
3942 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
3947 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M)) {
3959 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3960 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
3961 ISD::ArgFlagsTy
Flags;
3962 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3964 CallLowering::ArgInfo GuardArgInfo(
3965 {GuardVal, FnTy->getParamType(0), {
Flags}});
3967 CallLowering::CallLoweringInfo
Info;
3968 Info.OrigArgs.push_back(GuardArgInfo);
3969 Info.CallConv = GuardCheckFn->getCallingConv();
3972 if (!CLI->lowerCall(MIRBuilder,
Info)) {
3973 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
3985 getStackGuard(Guard, *CurBuilder);
3988 const Value *IRGuard = TLI->getSDagStackGuard(M);
3989 Register GuardPtr = getOrCreateVReg(*IRGuard);
3992 ->buildLoad(PtrMemTy, GuardPtr,
4011 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4013 const RTLIB::Libcall
Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
4014 const char *
Name = TLI->getLibcallName(Libcall);
4016 CallLowering::CallLoweringInfo
Info;
4017 Info.CallConv = TLI->getLibcallCallingConv(Libcall);
4021 if (!CLI->lowerCall(*CurBuilder,
Info)) {
4022 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4027 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4029 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4034void IRTranslator::finalizeFunction() {
4037 PendingPHIs.clear();
4039 FrameIndices.clear();
4040 MachinePreds.clear();
4044 EntryBuilder.reset();
4047 SPDescriptor.resetPerFunctionState();
4060 return CI && CI->isMustTailCall();
4074 : TPC->isGISelCSEEnabled();
4075 TLI = MF->getSubtarget().getTargetLowering();
4078 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4079 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4080 EntryBuilder->setCSEInfo(CSEInfo);
4081 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4082 CurBuilder->setCSEInfo(CSEInfo);
4084 EntryBuilder = std::make_unique<MachineIRBuilder>();
4085 CurBuilder = std::make_unique<MachineIRBuilder>();
4087 CLI = MF->getSubtarget().getCallLowering();
4088 CurBuilder->setMF(*MF);
4089 EntryBuilder->setMF(*MF);
4090 MRI = &MF->getRegInfo();
4091 DL = &
F.getDataLayout();
4092 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4094 TM.resetTargetOptions(
F);
4102 FuncInfo.BPI =
nullptr;
4108 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4110 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4111 SL->init(*TLI, TM, *DL);
4113 assert(PendingPHIs.empty() &&
"stale PHIs");
4117 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4120 F.getSubprogram(), &
F.getEntryBlock());
4121 R <<
"unable to translate in big endian mode";
4127 auto FinalizeOnReturn =
make_scope_exit([
this]() { finalizeFunction(); });
4132 EntryBuilder->setMBB(*EntryBB);
4134 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4135 SwiftError.setFunction(CurMF);
4136 SwiftError.createEntriesInEntryBlock(DbgLoc);
4138 bool IsVarArg =
F.isVarArg();
4139 bool HasMustTailInVarArgFn =
false;
4142 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4146 MBB = MF->CreateMachineBasicBlock(&BB);
4152 if (!HasMustTailInVarArgFn)
4156 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4159 EntryBB->addSuccessor(&getMBB(
F.front()));
4161 if (CLI->fallBackToDAGISel(*MF)) {
4163 F.getSubprogram(), &
F.getEntryBlock());
4164 R <<
"unable to lower function: "
4165 <<
ore::NV(
"Prototype",
F.getFunctionType());
4173 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4178 if (Arg.hasSwiftErrorAttr()) {
4179 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4180 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4184 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4186 F.getSubprogram(), &
F.getEntryBlock());
4187 R <<
"unable to lower arguments: "
4188 <<
ore::NV(
"Prototype",
F.getFunctionType());
4195 if (EnableCSE && CSEInfo)
4200 DILocationVerifier Verifier;
4208 CurBuilder->setMBB(
MBB);
4209 HasTailCall =
false;
4219 Verifier.setCurrentInst(&Inst);
4223 translateDbgInfo(Inst, *CurBuilder);
4225 if (translate(Inst))
4230 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4232 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4233 std::string InstStrStorage;
4237 R <<
": '" << InstStrStorage <<
"'";
4244 if (!finalizeBasicBlock(*BB,
MBB)) {
4246 BB->getTerminator()->getDebugLoc(), BB);
4247 R <<
"unable to translate basic block";
4257 finishPendingPhis();
4259 SwiftError.propagateVRegs();
4264 assert(EntryBB->succ_size() == 1 &&
4265 "Custom BB used for lowering should have only one successor");
4269 "LLVM-IR entry block has a predecessor!?");
4272 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4281 EntryBB->removeSuccessor(&NewEntryBB);
4282 MF->remove(EntryBB);
4283 MF->deleteMachineBasicBlock(EntryBB);
4285 assert(&MF->front() == &NewEntryBB &&
4286 "New entry wasn't next in the list of basic block!");
4290 SP.copyToMachineFrameInfo(MF->getFrameInfo());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, const TargetPassConfig &TPC, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPEXT Op.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
Target-Independent Code Generator Pass Configuration Options.
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ Libcall
The operation should be implemented as a call to some kind of runtime support library.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB