64#include "llvm/IR/IntrinsicsAMDGPU.h"
93#define DEBUG_TYPE "irtranslator"
99 cl::desc(
"Should enable CSE in irtranslator"),
116 MF.getProperties().setFailedISel();
117 bool IsGlobalISelAbortEnabled =
122 if (!R.getLocation().isValid() || IsGlobalISelAbortEnabled)
123 R << (
" (in function: " + MF.getName() +
")").str();
125 if (IsGlobalISelAbortEnabled)
142 DILocationVerifier() =
default;
143 ~DILocationVerifier()
override =
default;
145 const Instruction *getCurrentInst()
const {
return CurrInst; }
146 void setCurrentInst(
const Instruction *Inst) { CurrInst = Inst; }
148 void erasingInstr(MachineInstr &
MI)
override {}
149 void changingInstr(MachineInstr &
MI)
override {}
150 void changedInstr(MachineInstr &
MI)
override {}
152 void createdInstr(MachineInstr &
MI)
override {
153 assert(getCurrentInst() &&
"Inserted instruction without a current MI");
158 <<
" was copied to " <<
MI);
164 (
MI.getParent()->isEntryBlock() && !
MI.getDebugLoc()) ||
165 (
MI.isDebugInstr())) &&
166 "Line info was not transferred to all instructions");
190IRTranslator::ValueToVRegInfo::VRegListT &
191IRTranslator::allocateVRegs(
const Value &Val) {
192 auto VRegsIt = VMap.findVRegs(Val);
193 if (VRegsIt != VMap.vregs_end())
194 return *VRegsIt->second;
195 auto *Regs = VMap.getVRegs(Val);
196 auto *Offsets = VMap.getOffsets(Val);
199 Offsets->empty() ? Offsets :
nullptr);
200 for (
unsigned i = 0; i < SplitTys.
size(); ++i)
206 auto VRegsIt = VMap.findVRegs(Val);
207 if (VRegsIt != VMap.vregs_end())
208 return *VRegsIt->second;
211 return *VMap.getVRegs(Val);
214 auto *VRegs = VMap.getVRegs(Val);
215 auto *Offsets = VMap.getOffsets(Val);
219 "Don't know how to create an empty vreg");
223 Offsets->empty() ? Offsets :
nullptr);
226 for (
auto Ty : SplitTys)
227 VRegs->push_back(
MRI->createGenericVirtualRegister(Ty));
235 while (
auto Elt =
C.getAggregateElement(Idx++)) {
236 auto EltRegs = getOrCreateVRegs(*Elt);
240 assert(SplitTys.size() == 1 &&
"unexpectedly split LLT");
241 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
244 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"GISelFailure",
245 MF->getFunction().getSubprogram(),
246 &MF->getFunction().getEntryBlock());
247 R <<
"unable to translate constant: " <<
ore::NV(
"Type", Val.
getType());
256int IRTranslator::getOrCreateFrameIndex(
const AllocaInst &AI) {
257 auto [MapEntry,
Inserted] = FrameIndices.try_emplace(&AI);
259 return MapEntry->second;
265 Size = std::max<uint64_t>(
Size, 1u);
267 int &FI = MapEntry->second;
268 FI = MF->getFrameInfo().CreateStackObject(
Size, AI.
getAlign(),
false, &AI);
274 return SI->getAlign();
276 return LI->getAlign();
282 OptimizationRemarkMissed
R(
"gisel-irtranslator",
"", &
I);
283 R <<
"unable to translate memop: " <<
ore::NV(
"Opcode", &
I);
289 MachineBasicBlock *
MBB = FuncInfo.getMBB(&BB);
290 assert(
MBB &&
"BasicBlock was not encountered before");
295 assert(NewPred &&
"new predecessor must be a real MachineBasicBlock");
296 MachinePreds[
Edge].push_back(NewPred);
307 return U.getType()->getScalarType()->isBFloatTy() ||
309 return V->getType()->getScalarType()->isBFloatTy();
313bool IRTranslator::translateBinaryOp(
unsigned Opcode,
const User &U,
322 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
323 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
335bool IRTranslator::translateUnaryOp(
unsigned Opcode,
const User &U,
340 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
352 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
355bool IRTranslator::translateCompare(
const User &U,
361 Register Op0 = getOrCreateVReg(*
U.getOperand(0));
362 Register Op1 = getOrCreateVReg(*
U.getOperand(1));
367 MIRBuilder.
buildICmp(Pred, Res, Op0, Op1, Flags);
375 MIRBuilder.
buildFCmp(Pred, Res, Op0, Op1, Flags);
383 if (Ret && DL->getTypeStoreSize(Ret->
getType()).isZero())
388 VRegs = getOrCreateVRegs(*Ret);
391 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
392 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
393 &RI, &MIRBuilder.
getMBB(), SwiftError.getFunctionArg());
399 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
402void IRTranslator::emitBranchForMergedCondition(
411 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
414 Condition = InvertCond ?
FC->getInversePredicate() :
FC->getPredicate();
417 SwitchCG::CaseBlock CB(Condition,
false, BOp->getOperand(0),
418 BOp->getOperand(1),
nullptr,
TBB, FBB, CurBB,
419 CurBuilder->getDebugLoc(), TProb, FProb);
420 SL->SwitchCases.push_back(CB);
426 SwitchCG::CaseBlock CB(
428 nullptr,
TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
429 SL->SwitchCases.push_back(CB);
434 return I->getParent() == BB;
438void IRTranslator::findMergedConditions(
443 using namespace PatternMatch;
444 assert((
Opc == Instruction::And ||
Opc == Instruction::Or) &&
445 "Expected Opc to be AND/OR");
451 findMergedConditions(NotCond,
TBB, FBB, CurBB, SwitchBB,
Opc, TProb, FProb,
457 const Value *BOpOp0, *BOpOp1;
471 if (BOpc == Instruction::And)
472 BOpc = Instruction::Or;
473 else if (BOpc == Instruction::Or)
474 BOpc = Instruction::And;
480 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
484 emitBranchForMergedCondition(
Cond,
TBB, FBB, CurBB, SwitchBB, TProb, FProb,
491 MachineBasicBlock *TmpBB =
495 if (
Opc == Instruction::Or) {
516 auto NewTrueProb = TProb / 2;
517 auto NewFalseProb = TProb / 2 + FProb;
519 findMergedConditions(BOpOp0,
TBB, TmpBB, CurBB, SwitchBB,
Opc, NewTrueProb,
520 NewFalseProb, InvertCond);
526 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
527 Probs[1], InvertCond);
529 assert(
Opc == Instruction::And &&
"Unknown merge op!");
549 auto NewTrueProb = TProb + FProb / 2;
550 auto NewFalseProb = FProb / 2;
552 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB,
Opc, NewTrueProb,
553 NewFalseProb, InvertCond);
559 findMergedConditions(BOpOp1,
TBB, FBB, TmpBB, SwitchBB,
Opc, Probs[0],
560 Probs[1], InvertCond);
564bool IRTranslator::shouldEmitAsBranches(
565 const std::vector<SwitchCG::CaseBlock> &Cases) {
567 if (Cases.size() != 2)
572 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
573 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
574 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
575 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
581 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
582 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
586 Cases[0].TrueBB == Cases[1].ThisBB)
589 Cases[0].FalseBB == Cases[1].ThisBB)
598 auto &CurMBB = MIRBuilder.
getMBB();
604 !CurMBB.isLayoutSuccessor(Succ0MBB))
608 for (
const BasicBlock *Succ :
successors(&BrInst))
609 CurMBB.addSuccessor(&getMBB(*Succ));
616 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.
getSuccessor(1));
635 using namespace PatternMatch;
637 if (!TLI->isJumpExpensive() && CondI && CondI->
hasOneUse() &&
638 !BrInst.
hasMetadata(LLVMContext::MD_unpredictable)) {
641 const Value *BOp0, *BOp1;
643 Opcode = Instruction::And;
645 Opcode = Instruction::Or;
649 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
650 getEdgeProbability(&CurMBB, Succ0MBB),
651 getEdgeProbability(&CurMBB, Succ1MBB),
653 assert(SL->SwitchCases[0].ThisBB == &CurMBB &&
"Unexpected lowering!");
656 if (shouldEmitAsBranches(SL->SwitchCases)) {
658 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
659 SL->SwitchCases.erase(SL->SwitchCases.begin());
665 for (
unsigned I = 1,
E = SL->SwitchCases.size();
I !=
E; ++
I)
666 MF->erase(SL->SwitchCases[
I].ThisBB);
668 SL->SwitchCases.clear();
675 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
676 CurBuilder->getDebugLoc());
680 emitSwitchCase(CB, &CurMBB, *CurBuilder);
688 Src->addSuccessorWithoutProb(Dst);
692 Prob = getEdgeProbability(Src, Dst);
693 Src->addSuccessor(Dst, Prob);
699 const BasicBlock *SrcBB = Src->getBasicBlock();
700 const BasicBlock *DstBB = Dst->getBasicBlock();
704 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
705 return BranchProbability(1, SuccSize);
707 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
711 using namespace SwitchCG;
714 BranchProbabilityInfo *BPI = FuncInfo.BPI;
716 Clusters.reserve(
SI.getNumCases());
717 for (
const auto &
I :
SI.cases()) {
718 MachineBasicBlock *Succ = &getMBB(*
I.getCaseSuccessor());
719 assert(Succ &&
"Could not find successor mbb in mapping");
720 const ConstantInt *CaseVal =
I.getCaseValue();
721 BranchProbability Prob =
723 : BranchProbability(1,
SI.getNumCases() + 1);
724 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
727 MachineBasicBlock *DefaultMBB = &getMBB(*
SI.getDefaultDest());
734 MachineBasicBlock *SwitchMBB = &getMBB(*
SI.getParent());
737 if (Clusters.empty()) {
744 SL->findJumpTables(Clusters, &SI, std::nullopt, DefaultMBB,
nullptr,
nullptr);
745 SL->findBitTestClusters(Clusters, &SI);
748 dbgs() <<
"Case clusters: ";
749 for (
const CaseCluster &
C : Clusters) {
750 if (
C.Kind == CC_JumpTable)
752 if (
C.Kind == CC_BitTests)
755 C.Low->getValue().print(
dbgs(),
true);
756 if (
C.Low !=
C.High) {
758 C.High->getValue().print(
dbgs(),
true);
765 assert(!Clusters.empty());
769 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
770 WorkList.push_back({SwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
772 while (!WorkList.empty()) {
773 SwitchWorkListItem
W = WorkList.pop_back_val();
775 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
777 if (NumClusters > 3 &&
780 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB, MIB);
784 if (!lowerSwitchWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
794 using namespace SwitchCG;
795 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
796 "Clusters not sorted?");
797 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
799 auto [LastLeft, FirstRight, LeftProb, RightProb] =
800 SL->computeSplitWorkItemInfo(W);
805 assert(PivotCluster >
W.FirstCluster);
806 assert(PivotCluster <=
W.LastCluster);
811 const ConstantInt *Pivot = PivotCluster->Low;
820 MachineBasicBlock *LeftMBB;
821 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
822 FirstLeft->Low ==
W.GE &&
823 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
824 LeftMBB = FirstLeft->MBB;
826 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
827 FuncInfo.MF->
insert(BBI, LeftMBB);
829 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
835 MachineBasicBlock *RightMBB;
836 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
W.LT &&
837 (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
838 RightMBB = FirstRight->MBB;
840 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
841 FuncInfo.MF->
insert(BBI, RightMBB);
843 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
851 if (
W.MBB == SwitchMBB)
852 emitSwitchCase(CB, SwitchMBB, MIB);
854 SL->SwitchCases.push_back(CB);
860 assert(JT.
Reg &&
"Should lower JT Header first!");
875 MachineIRBuilder MIB(*HeaderBB->
getParent());
882 Register SwitchOpReg = getOrCreateVReg(SValue);
884 auto Sub = MIB.
buildSub({SwitchTy}, SwitchOpReg, FirstCst);
889 const LLT PtrScalarTy =
LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
903 auto Cst = getOrCreateVReg(
944 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
958 "Can only handle SLE ranges");
969 const LLT CmpTy = MRI->getType(CmpOpReg);
970 auto Sub = MIB.
buildSub({CmpTy}, CmpOpReg, CondLHS);
1005 bool FallthroughUnreachable) {
1006 using namespace SwitchCG;
1007 MachineFunction *CurMF = SwitchMBB->
getParent();
1009 JumpTableHeader *JTH = &SL->JTCases[
I->JTCasesIndex].first;
1010 SwitchCG::JumpTable *JT = &SL->JTCases[
I->JTCasesIndex].second;
1011 BranchProbability DefaultProb =
W.DefaultProb;
1014 MachineBasicBlock *JumpMBB = JT->
MBB;
1015 CurMF->
insert(BBI, JumpMBB);
1025 auto JumpProb =
I->Prob;
1026 auto FallthroughProb = UnhandledProbs;
1034 if (*SI == DefaultMBB) {
1035 JumpProb += DefaultProb / 2;
1036 FallthroughProb -= DefaultProb / 2;
1041 addMachineCFGPred({SwitchMBB->
getBasicBlock(), (*SI)->getBasicBlock()},
1046 if (FallthroughUnreachable)
1047 JTH->FallthroughUnreachable =
true;
1049 if (!JTH->FallthroughUnreachable)
1050 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
1051 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
1056 JTH->HeaderBB = CurMBB;
1060 if (CurMBB == SwitchMBB) {
1061 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
1063 JTH->Emitted =
true;
1070 bool FallthroughUnreachable,
1075 using namespace SwitchCG;
1078 if (
I->Low ==
I->High) {
1094 CaseBlock CB(Pred, FallthroughUnreachable,
LHS,
RHS, MHS,
I->MBB, Fallthrough,
1097 emitSwitchCase(CB, SwitchMBB, MIB);
1103 MachineIRBuilder &MIB = *CurBuilder;
1107 Register SwitchOpReg = getOrCreateVReg(*
B.SValue);
1109 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1111 auto RangeSub = MIB.
buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1116 LLT MaskTy = SwitchOpTy;
1122 for (
const SwitchCG::BitTestCase &Case :
B.Cases) {
1132 if (SwitchOpTy != MaskTy)
1138 MachineBasicBlock *
MBB =
B.Cases[0].ThisBB;
1140 if (!
B.FallthroughUnreachable)
1141 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
1142 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
1146 if (!
B.FallthroughUnreachable) {
1150 RangeSub, RangeCst);
1164 MachineIRBuilder &MIB = *CurBuilder;
1170 if (PopCount == 1) {
1173 auto MaskTrailingZeros =
1178 }
else if (PopCount == BB.
Range) {
1180 auto MaskTrailingOnes =
1187 auto SwitchVal = MIB.
buildShl(SwitchTy, CstOne,
Reg);
1191 auto AndOp = MIB.
buildAnd(SwitchTy, SwitchVal, CstMask);
1198 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
1200 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1218bool IRTranslator::lowerBitTestWorkItem(
1224 bool FallthroughUnreachable) {
1225 using namespace SwitchCG;
1226 MachineFunction *CurMF = SwitchMBB->
getParent();
1228 BitTestBlock *BTB = &SL->BitTestCases[
I->BTCasesIndex];
1230 for (BitTestCase &BTC : BTB->Cases)
1231 CurMF->
insert(BBI, BTC.ThisBB);
1234 BTB->Parent = CurMBB;
1235 BTB->Default = Fallthrough;
1237 BTB->DefaultProb = UnhandledProbs;
1241 if (!BTB->ContiguousRange) {
1242 BTB->Prob += DefaultProb / 2;
1243 BTB->DefaultProb -= DefaultProb / 2;
1246 if (FallthroughUnreachable)
1247 BTB->FallthroughUnreachable =
true;
1250 if (CurMBB == SwitchMBB) {
1251 emitBitTestHeader(*BTB, SwitchMBB);
1252 BTB->Emitted =
true;
1262 using namespace SwitchCG;
1263 MachineFunction *CurMF = FuncInfo.MF;
1264 MachineBasicBlock *NextMBB =
nullptr;
1266 if (++BBI != FuncInfo.MF->end())
1275 [](
const CaseCluster &a,
const CaseCluster &b) {
1276 return a.Prob != b.Prob
1278 : a.Low->getValue().slt(b.Low->getValue());
1283 for (CaseClusterIt
I =
W.LastCluster;
I >
W.FirstCluster;) {
1285 if (
I->Prob >
W.LastCluster->Prob)
1287 if (
I->Kind == CC_Range &&
I->MBB == NextMBB) {
1295 BranchProbability DefaultProb =
W.DefaultProb;
1296 BranchProbability UnhandledProbs = DefaultProb;
1297 for (CaseClusterIt
I =
W.FirstCluster;
I <=
W.LastCluster; ++
I)
1298 UnhandledProbs +=
I->Prob;
1300 MachineBasicBlock *CurMBB =
W.MBB;
1301 for (CaseClusterIt
I =
W.FirstCluster,
E =
W.LastCluster;
I <=
E; ++
I) {
1302 bool FallthroughUnreachable =
false;
1303 MachineBasicBlock *Fallthrough;
1304 if (
I ==
W.LastCluster) {
1306 Fallthrough = DefaultMBB;
1311 CurMF->
insert(BBI, Fallthrough);
1313 UnhandledProbs -=
I->Prob;
1317 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1318 DefaultProb, UnhandledProbs,
I, Fallthrough,
1319 FallthroughUnreachable)) {
1327 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1328 UnhandledProbs,
I, Fallthrough,
1329 FallthroughUnreachable)) {
1336 if (!lowerSwitchRangeWorkItem(
I,
Cond, Fallthrough,
1337 FallthroughUnreachable, UnhandledProbs,
1338 CurMBB, MIB, SwitchMBB)) {
1345 CurMBB = Fallthrough;
1351bool IRTranslator::translateIndirectBr(
const User &U,
1359 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1360 MachineBasicBlock &CurBB = MIRBuilder.
getMBB();
1361 for (
const BasicBlock *Succ :
successors(&BrInst)) {
1365 if (!AddedSuccessors.
insert(Succ).second)
1375 return Arg->hasSwiftErrorAttr();
1383 TypeSize StoreSize = DL->getTypeStoreSize(LI.
getType());
1388 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(LI);
1393 Type *OffsetIRTy = DL->getIndexType(Ptr->
getType());
1397 assert(Regs.
size() == 1 &&
"swifterror should be single pointer");
1399 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.
getMBB(), Ptr);
1405 TLI->getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1407 if (AA->pointsToConstantMemory(
1415 for (
unsigned i = 0; i < Regs.
size(); ++i) {
1420 Align BaseAlign = getMemOpAlign(LI);
1422 MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Regs[i]),
1425 MIRBuilder.
buildLoad(Regs[i], Addr, *MMO);
1433 if (DL->getTypeStoreSize(
SI.getValueOperand()->getType()).isZero())
1437 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*
SI.getValueOperand());
1440 Type *OffsetIRTy = DL->getIndexType(
SI.getPointerOperandType());
1443 if (CLI->supportSwiftError() &&
isSwiftError(
SI.getPointerOperand())) {
1444 assert(Vals.
size() == 1 &&
"swifterror should be single pointer");
1446 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.
getMBB(),
1447 SI.getPointerOperand());
1454 for (
unsigned i = 0; i < Vals.
size(); ++i) {
1458 MachinePointerInfo Ptr(
SI.getPointerOperand(), Offsets[i]);
1459 Align BaseAlign = getMemOpAlign(SI);
1460 auto MMO = MF->getMachineMemOperand(Ptr, Flags, MRI->getType(Vals[i]),
1462 SI.getAAMetadata(),
nullptr,
1463 SI.getSyncScopeID(),
SI.getOrdering());
1470 const Value *Src = U.getOperand(0);
1479 for (
auto Idx : EVI->indices())
1482 for (
auto Idx : IVI->indices())
1489 DL.getIndexedOffsetInType(Src->getType(), Indices));
1492bool IRTranslator::translateExtractValue(
const User &U,
1494 const Value *Src =
U.getOperand(0);
1497 ArrayRef<uint64_t>
Offsets = *VMap.getOffsets(*Src);
1499 auto &DstRegs = allocateVRegs(U);
1501 for (
unsigned i = 0; i < DstRegs.size(); ++i)
1502 DstRegs[i] = SrcRegs[Idx++];
1507bool IRTranslator::translateInsertValue(
const User &U,
1509 const Value *Src =
U.getOperand(0);
1511 auto &DstRegs = allocateVRegs(U);
1512 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1515 auto *InsertedIt = InsertedRegs.
begin();
1517 for (
unsigned i = 0; i < DstRegs.size(); ++i) {
1518 if (DstOffsets[i] >=
Offset && InsertedIt != InsertedRegs.
end())
1519 DstRegs[i] = *InsertedIt++;
1521 DstRegs[i] = SrcRegs[i];
1527bool IRTranslator::translateSelect(
const User &U,
1529 Register Tst = getOrCreateVReg(*
U.getOperand(0));
1538 for (
unsigned i = 0; i < ResRegs.
size(); ++i) {
1539 MIRBuilder.
buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1545bool IRTranslator::translateCopy(
const User &U,
const Value &V,
1548 auto &Regs = *VMap.getVRegs(U);
1550 Regs.push_back(Src);
1551 VMap.getOffsets(U)->push_back(0);
1560bool IRTranslator::translateBitCast(
const User &U,
1568 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1570 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
1573 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1576bool IRTranslator::translateCast(
unsigned Opcode,
const User &U,
1591bool IRTranslator::translateGetElementPtr(
const User &U,
1593 Value &Op0 = *
U.getOperand(0);
1597 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1600 uint32_t PtrAddFlags = 0;
1606 auto PtrAddFlagsWithConst = [&](int64_t
Offset) {
1616 unsigned VectorWidth = 0;
1620 bool WantSplatVector =
false;
1624 WantSplatVector = VectorWidth > 1;
1629 if (WantSplatVector && !PtrTy.
isVector()) {
1636 OffsetIRTy = DL->getIndexType(PtrIRTy);
1643 const Value *Idx = GTI.getOperand();
1644 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1646 Offset += DL->getStructLayout(StTy)->getElementOffset(
Field);
1649 uint64_t ElementSize = GTI.getSequentialElementStride(*DL);
1654 if (std::optional<int64_t> Val = CI->getValue().trySExtValue()) {
1655 Offset += ElementSize * *Val;
1664 PtrAddFlagsWithConst(
Offset))
1669 Register IdxReg = getOrCreateVReg(*Idx);
1670 LLT IdxTy = MRI->getType(IdxReg);
1671 if (IdxTy != OffsetTy) {
1672 if (!IdxTy.
isVector() && WantSplatVector) {
1685 if (ElementSize != 1) {
1696 MIRBuilder.
buildMul(OffsetTy, IdxReg, ElementSizeMIB, ScaleFlags)
1699 GepOffsetReg = IdxReg;
1703 MIRBuilder.
buildPtrAdd(PtrTy, BaseReg, GepOffsetReg, PtrAddFlags)
1712 MIRBuilder.
buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1713 PtrAddFlagsWithConst(
Offset));
1717 MIRBuilder.
buildCopy(getOrCreateVReg(U), BaseReg);
1721bool IRTranslator::translateMemFunc(
const CallInst &CI,
1731 unsigned MinPtrSize = UINT_MAX;
1732 for (
auto AI = CI.
arg_begin(), AE = CI.
arg_end(); std::next(AI) != AE; ++AI) {
1733 Register SrcReg = getOrCreateVReg(**AI);
1734 LLT SrcTy = MRI->getType(SrcReg);
1736 MinPtrSize = std::min<unsigned>(SrcTy.
getSizeInBits(), MinPtrSize);
1744 if (MRI->getType(SizeOpReg) != SizeTy)
1756 ConstantInt *CopySize =
nullptr;
1759 DstAlign = MCI->getDestAlign().valueOrOne();
1760 SrcAlign = MCI->getSourceAlign().valueOrOne();
1763 DstAlign = MMI->getDestAlign().valueOrOne();
1764 SrcAlign = MMI->getSourceAlign().valueOrOne();
1768 DstAlign = MSI->getDestAlign().valueOrOne();
1771 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1787 if (AA && CopySize &&
1788 AA->pointsToConstantMemory(MemoryLocation(
1798 ICall.addMemOperand(
1799 MF->getMachineMemOperand(MachinePointerInfo(CI.
getArgOperand(0)),
1800 StoreFlags, 1, DstAlign, AAInfo));
1801 if (Opcode != TargetOpcode::G_MEMSET)
1802 ICall.addMemOperand(MF->getMachineMemOperand(
1803 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1808bool IRTranslator::translateTrap(
const CallInst &CI,
1811 StringRef TrapFuncName =
1812 CI.
getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
1813 if (TrapFuncName.
empty()) {
1814 if (Opcode == TargetOpcode::G_UBSANTRAP) {
1823 CallLowering::CallLoweringInfo
Info;
1824 if (Opcode == TargetOpcode::G_UBSANTRAP)
1831 return CLI->lowerCall(MIRBuilder, Info);
1834bool IRTranslator::translateVectorInterleave2Intrinsic(
1837 "This function can only be called on the interleave2 intrinsic!");
1841 Register Res = getOrCreateVReg(CI);
1843 LLT OpTy = MRI->getType(Op0);
1850bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1853 "This function can only be called on the deinterleave2 intrinsic!");
1860 LLT ResTy = MRI->getType(Res[0]);
1869void IRTranslator::getStackGuard(
Register DstReg,
1872 TLI->getSDagStackGuard(*MF->getFunction().getParent(), *Libcalls);
1875 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
1880 const TargetRegisterInfo *
TRI = MF->getSubtarget().getRegisterInfo();
1881 MRI->setRegClass(DstReg,
TRI->getPointerRegClass());
1883 MIRBuilder.
buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1885 unsigned AddrSpace =
Global->getType()->getPointerAddressSpace();
1886 LLT PtrTy =
LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1888 MachinePointerInfo MPInfo(
Global);
1891 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1892 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1893 MIB.setMemRefs({MemRef});
1896bool IRTranslator::translateOverflowIntrinsic(
const CallInst &CI,
unsigned Op,
1900 Op, {ResRegs[0], ResRegs[1]},
1906bool IRTranslator::translateFixedPointIntrinsic(
unsigned Op,
const CallInst &CI,
1908 Register Dst = getOrCreateVReg(CI);
1912 MIRBuilder.
buildInstr(
Op, {Dst}, { Src0, Src1, Scale });
1920 case Intrinsic::acos:
1921 return TargetOpcode::G_FACOS;
1922 case Intrinsic::asin:
1923 return TargetOpcode::G_FASIN;
1924 case Intrinsic::atan:
1925 return TargetOpcode::G_FATAN;
1926 case Intrinsic::atan2:
1927 return TargetOpcode::G_FATAN2;
1928 case Intrinsic::bswap:
1929 return TargetOpcode::G_BSWAP;
1930 case Intrinsic::bitreverse:
1931 return TargetOpcode::G_BITREVERSE;
1932 case Intrinsic::fshl:
1933 return TargetOpcode::G_FSHL;
1934 case Intrinsic::fshr:
1935 return TargetOpcode::G_FSHR;
1936 case Intrinsic::ceil:
1937 return TargetOpcode::G_FCEIL;
1938 case Intrinsic::cos:
1939 return TargetOpcode::G_FCOS;
1940 case Intrinsic::cosh:
1941 return TargetOpcode::G_FCOSH;
1942 case Intrinsic::ctpop:
1943 return TargetOpcode::G_CTPOP;
1944 case Intrinsic::exp:
1945 return TargetOpcode::G_FEXP;
1946 case Intrinsic::exp2:
1947 return TargetOpcode::G_FEXP2;
1948 case Intrinsic::exp10:
1949 return TargetOpcode::G_FEXP10;
1950 case Intrinsic::fabs:
1951 return TargetOpcode::G_FABS;
1952 case Intrinsic::copysign:
1953 return TargetOpcode::G_FCOPYSIGN;
1954 case Intrinsic::minnum:
1955 return TargetOpcode::G_FMINNUM;
1956 case Intrinsic::maxnum:
1957 return TargetOpcode::G_FMAXNUM;
1958 case Intrinsic::minimum:
1959 return TargetOpcode::G_FMINIMUM;
1960 case Intrinsic::maximum:
1961 return TargetOpcode::G_FMAXIMUM;
1962 case Intrinsic::minimumnum:
1963 return TargetOpcode::G_FMINIMUMNUM;
1964 case Intrinsic::maximumnum:
1965 return TargetOpcode::G_FMAXIMUMNUM;
1966 case Intrinsic::canonicalize:
1967 return TargetOpcode::G_FCANONICALIZE;
1968 case Intrinsic::floor:
1969 return TargetOpcode::G_FFLOOR;
1970 case Intrinsic::fma:
1971 return TargetOpcode::G_FMA;
1972 case Intrinsic::log:
1973 return TargetOpcode::G_FLOG;
1974 case Intrinsic::log2:
1975 return TargetOpcode::G_FLOG2;
1976 case Intrinsic::log10:
1977 return TargetOpcode::G_FLOG10;
1978 case Intrinsic::ldexp:
1979 return TargetOpcode::G_FLDEXP;
1980 case Intrinsic::nearbyint:
1981 return TargetOpcode::G_FNEARBYINT;
1982 case Intrinsic::pow:
1983 return TargetOpcode::G_FPOW;
1984 case Intrinsic::powi:
1985 return TargetOpcode::G_FPOWI;
1986 case Intrinsic::rint:
1987 return TargetOpcode::G_FRINT;
1988 case Intrinsic::round:
1989 return TargetOpcode::G_INTRINSIC_ROUND;
1990 case Intrinsic::roundeven:
1991 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1992 case Intrinsic::sin:
1993 return TargetOpcode::G_FSIN;
1994 case Intrinsic::sinh:
1995 return TargetOpcode::G_FSINH;
1996 case Intrinsic::sqrt:
1997 return TargetOpcode::G_FSQRT;
1998 case Intrinsic::tan:
1999 return TargetOpcode::G_FTAN;
2000 case Intrinsic::tanh:
2001 return TargetOpcode::G_FTANH;
2002 case Intrinsic::trunc:
2003 return TargetOpcode::G_INTRINSIC_TRUNC;
2004 case Intrinsic::readcyclecounter:
2005 return TargetOpcode::G_READCYCLECOUNTER;
2006 case Intrinsic::readsteadycounter:
2007 return TargetOpcode::G_READSTEADYCOUNTER;
2008 case Intrinsic::ptrmask:
2009 return TargetOpcode::G_PTRMASK;
2010 case Intrinsic::lrint:
2011 return TargetOpcode::G_INTRINSIC_LRINT;
2012 case Intrinsic::llrint:
2013 return TargetOpcode::G_INTRINSIC_LLRINT;
2015 case Intrinsic::vector_reduce_fmin:
2016 return TargetOpcode::G_VECREDUCE_FMIN;
2017 case Intrinsic::vector_reduce_fmax:
2018 return TargetOpcode::G_VECREDUCE_FMAX;
2019 case Intrinsic::vector_reduce_fminimum:
2020 return TargetOpcode::G_VECREDUCE_FMINIMUM;
2021 case Intrinsic::vector_reduce_fmaximum:
2022 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
2023 case Intrinsic::vector_reduce_add:
2024 return TargetOpcode::G_VECREDUCE_ADD;
2025 case Intrinsic::vector_reduce_mul:
2026 return TargetOpcode::G_VECREDUCE_MUL;
2027 case Intrinsic::vector_reduce_and:
2028 return TargetOpcode::G_VECREDUCE_AND;
2029 case Intrinsic::vector_reduce_or:
2030 return TargetOpcode::G_VECREDUCE_OR;
2031 case Intrinsic::vector_reduce_xor:
2032 return TargetOpcode::G_VECREDUCE_XOR;
2033 case Intrinsic::vector_reduce_smax:
2034 return TargetOpcode::G_VECREDUCE_SMAX;
2035 case Intrinsic::vector_reduce_smin:
2036 return TargetOpcode::G_VECREDUCE_SMIN;
2037 case Intrinsic::vector_reduce_umax:
2038 return TargetOpcode::G_VECREDUCE_UMAX;
2039 case Intrinsic::vector_reduce_umin:
2040 return TargetOpcode::G_VECREDUCE_UMIN;
2041 case Intrinsic::experimental_vector_compress:
2042 return TargetOpcode::G_VECTOR_COMPRESS;
2043 case Intrinsic::lround:
2044 return TargetOpcode::G_LROUND;
2045 case Intrinsic::llround:
2046 return TargetOpcode::G_LLROUND;
2047 case Intrinsic::get_fpenv:
2048 return TargetOpcode::G_GET_FPENV;
2049 case Intrinsic::get_fpmode:
2050 return TargetOpcode::G_GET_FPMODE;
2055bool IRTranslator::translateSimpleIntrinsic(
const CallInst &CI,
2059 unsigned Op = getSimpleIntrinsicOpcode(
ID);
2067 for (
const auto &Arg : CI.
args())
2070 MIRBuilder.
buildInstr(
Op, {getOrCreateVReg(CI)}, VRegs,
2078 case Intrinsic::experimental_constrained_fadd:
2079 return TargetOpcode::G_STRICT_FADD;
2080 case Intrinsic::experimental_constrained_fsub:
2081 return TargetOpcode::G_STRICT_FSUB;
2082 case Intrinsic::experimental_constrained_fmul:
2083 return TargetOpcode::G_STRICT_FMUL;
2084 case Intrinsic::experimental_constrained_fdiv:
2085 return TargetOpcode::G_STRICT_FDIV;
2086 case Intrinsic::experimental_constrained_frem:
2087 return TargetOpcode::G_STRICT_FREM;
2088 case Intrinsic::experimental_constrained_fma:
2089 return TargetOpcode::G_STRICT_FMA;
2090 case Intrinsic::experimental_constrained_sqrt:
2091 return TargetOpcode::G_STRICT_FSQRT;
2092 case Intrinsic::experimental_constrained_ldexp:
2093 return TargetOpcode::G_STRICT_FLDEXP;
2099bool IRTranslator::translateConstrainedFPIntrinsic(
2119std::optional<MCRegister> IRTranslator::getArgPhysReg(
Argument &Arg) {
2120 auto VRegs = getOrCreateVRegs(Arg);
2121 if (VRegs.
size() != 1)
2122 return std::nullopt;
2125 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
2126 if (!VRegDef || !VRegDef->isCopy())
2127 return std::nullopt;
2128 return VRegDef->getOperand(1).getReg().asMCReg();
2131bool IRTranslator::translateIfEntryValueArgument(
bool isDeclare,
Value *Val,
2143 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
2145 LLVM_DEBUG(
dbgs() <<
"Dropping dbg." << (isDeclare ?
"declare" :
"value")
2146 <<
": expression is entry_value but "
2147 <<
"couldn't find a physical register\n");
2155 MF->setVariableDbgInfo(Var, Expr, *PhysReg, DL);
2167 case Intrinsic::experimental_convergence_anchor:
2168 return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2169 case Intrinsic::experimental_convergence_entry:
2170 return TargetOpcode::CONVERGENCECTRL_ENTRY;
2171 case Intrinsic::experimental_convergence_loop:
2172 return TargetOpcode::CONVERGENCECTRL_LOOP;
2176bool IRTranslator::translateConvergenceControlIntrinsic(
2179 Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2182 if (
ID == Intrinsic::experimental_convergence_loop) {
2184 assert(Bundle &&
"Expected a convergence control token.");
2186 getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2196 if (ORE->enabled()) {
2198 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2206 if (translateSimpleIntrinsic(CI,
ID, MIRBuilder))
2212 case Intrinsic::lifetime_start:
2213 case Intrinsic::lifetime_end: {
2216 MF->getFunction().hasOptNone())
2219 unsigned Op =
ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
2220 : TargetOpcode::LIFETIME_END;
2229 case Intrinsic::fake_use: {
2231 for (
const auto &Arg : CI.
args())
2233 MIRBuilder.
buildInstr(TargetOpcode::FAKE_USE, {}, VRegs);
2234 MF->setHasFakeUses(
true);
2237 case Intrinsic::dbg_declare: {
2244 case Intrinsic::dbg_label: {
2250 "Expected inlined-at fields to agree");
2255 case Intrinsic::vaend:
2259 case Intrinsic::vastart: {
2261 unsigned ListSize = TLI->getVaListSizeInBits(*DL) / 8;
2264 MIRBuilder.
buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2265 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2267 ListSize, Alignment));
2270 case Intrinsic::dbg_assign:
2277 case Intrinsic::dbg_value: {
2284 case Intrinsic::uadd_with_overflow:
2285 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2286 case Intrinsic::sadd_with_overflow:
2287 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2288 case Intrinsic::usub_with_overflow:
2289 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2290 case Intrinsic::ssub_with_overflow:
2291 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2292 case Intrinsic::umul_with_overflow:
2293 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2294 case Intrinsic::smul_with_overflow:
2295 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2296 case Intrinsic::uadd_sat:
2297 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2298 case Intrinsic::sadd_sat:
2299 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2300 case Intrinsic::usub_sat:
2301 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2302 case Intrinsic::ssub_sat:
2303 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2304 case Intrinsic::ushl_sat:
2305 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2306 case Intrinsic::sshl_sat:
2307 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2308 case Intrinsic::umin:
2309 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2310 case Intrinsic::umax:
2311 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2312 case Intrinsic::smin:
2313 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2314 case Intrinsic::smax:
2315 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2316 case Intrinsic::abs:
2318 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2319 case Intrinsic::smul_fix:
2320 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2321 case Intrinsic::umul_fix:
2322 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2323 case Intrinsic::smul_fix_sat:
2324 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2325 case Intrinsic::umul_fix_sat:
2326 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2327 case Intrinsic::sdiv_fix:
2328 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2329 case Intrinsic::udiv_fix:
2330 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2331 case Intrinsic::sdiv_fix_sat:
2332 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2333 case Intrinsic::udiv_fix_sat:
2334 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2335 case Intrinsic::fmuladd: {
2336 const TargetMachine &TM = MF->getTarget();
2337 Register Dst = getOrCreateVReg(CI);
2342 TLI->isFMAFasterThanFMulAndFAdd(*MF,
2343 TLI->getValueType(*DL, CI.
getType()))) {
2346 MIRBuilder.
buildFMA(Dst, Op0, Op1, Op2,
2357 case Intrinsic::frexp: {
2364 case Intrinsic::modf: {
2366 MIRBuilder.
buildModf(VRegs[0], VRegs[1],
2371 case Intrinsic::sincos: {
2378 case Intrinsic::fptosi_sat:
2382 case Intrinsic::fptoui_sat:
2386 case Intrinsic::memcpy_inline:
2387 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2388 case Intrinsic::memcpy:
2389 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2390 case Intrinsic::memmove:
2391 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2392 case Intrinsic::memset:
2393 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2394 case Intrinsic::eh_typeid_for: {
2397 unsigned TypeID = MF->getTypeIDFor(GV);
2401 case Intrinsic::objectsize:
2404 case Intrinsic::is_constant:
2407 case Intrinsic::stackguard:
2408 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2410 case Intrinsic::stackprotector: {
2413 if (TLI->useLoadStackGuardNode(*CI.
getModule())) {
2414 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2415 getStackGuard(GuardVal, MIRBuilder);
2420 int FI = getOrCreateFrameIndex(*Slot);
2421 MF->getFrameInfo().setStackProtectorIndex(FI);
2424 GuardVal, getOrCreateVReg(*Slot),
2431 case Intrinsic::stacksave: {
2432 MIRBuilder.
buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2435 case Intrinsic::stackrestore: {
2436 MIRBuilder.
buildInstr(TargetOpcode::G_STACKRESTORE, {},
2440 case Intrinsic::cttz:
2441 case Intrinsic::ctlz: {
2443 bool isTrailing =
ID == Intrinsic::cttz;
2444 unsigned Opcode = isTrailing
2445 ? Cst->
isZero() ? TargetOpcode::G_CTTZ
2446 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2447 : Cst->
isZero() ? TargetOpcode::G_CTLZ
2448 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2449 MIRBuilder.
buildInstr(Opcode, {getOrCreateVReg(CI)},
2453 case Intrinsic::invariant_start: {
2457 case Intrinsic::invariant_end:
2459 case Intrinsic::expect:
2460 case Intrinsic::expect_with_probability:
2461 case Intrinsic::annotation:
2462 case Intrinsic::ptr_annotation:
2463 case Intrinsic::launder_invariant_group:
2464 case Intrinsic::strip_invariant_group: {
2466 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2470 case Intrinsic::assume:
2471 case Intrinsic::experimental_noalias_scope_decl:
2472 case Intrinsic::var_annotation:
2473 case Intrinsic::sideeffect:
2476 case Intrinsic::read_volatile_register:
2477 case Intrinsic::read_register: {
2480 .
buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2484 case Intrinsic::write_register: {
2486 MIRBuilder.
buildInstr(TargetOpcode::G_WRITE_REGISTER)
2491 case Intrinsic::localescape: {
2492 MachineBasicBlock &EntryMBB = MF->front();
2497 for (
unsigned Idx = 0,
E = CI.
arg_size(); Idx <
E; ++Idx) {
2504 MF->getContext().getOrCreateFrameAllocSymbol(EscapedName, Idx);
2517 case Intrinsic::vector_reduce_fadd:
2518 case Intrinsic::vector_reduce_fmul: {
2521 Register Dst = getOrCreateVReg(CI);
2527 Opc =
ID == Intrinsic::vector_reduce_fadd
2528 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2529 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2530 if (!MRI->getType(VecSrc).isVector())
2531 Opc =
ID == Intrinsic::vector_reduce_fadd ? TargetOpcode::G_FADD
2532 : TargetOpcode::G_FMUL;
2540 if (
ID == Intrinsic::vector_reduce_fadd) {
2541 Opc = TargetOpcode::G_VECREDUCE_FADD;
2542 ScalarOpc = TargetOpcode::G_FADD;
2544 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2545 ScalarOpc = TargetOpcode::G_FMUL;
2547 LLT DstTy = MRI->getType(Dst);
2550 MIRBuilder.
buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2555 case Intrinsic::trap:
2556 return translateTrap(CI, MIRBuilder, TargetOpcode::G_TRAP);
2557 case Intrinsic::debugtrap:
2558 return translateTrap(CI, MIRBuilder, TargetOpcode::G_DEBUGTRAP);
2559 case Intrinsic::ubsantrap:
2560 return translateTrap(CI, MIRBuilder, TargetOpcode::G_UBSANTRAP);
2561 case Intrinsic::allow_runtime_check:
2562 case Intrinsic::allow_ubsan_check:
2563 MIRBuilder.
buildCopy(getOrCreateVReg(CI),
2566 case Intrinsic::amdgcn_cs_chain:
2567 case Intrinsic::amdgcn_call_whole_wave:
2568 return translateCallBase(CI, MIRBuilder);
2569 case Intrinsic::fptrunc_round: {
2574 std::optional<RoundingMode> RoundMode =
2579 .
buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2580 {getOrCreateVReg(CI)},
2582 .addImm((
int)*RoundMode);
2586 case Intrinsic::is_fpclass: {
2591 .
buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2592 {getOrCreateVReg(*FpValue)})
2597 case Intrinsic::set_fpenv: {
2602 case Intrinsic::reset_fpenv:
2605 case Intrinsic::set_fpmode: {
2610 case Intrinsic::reset_fpmode:
2613 case Intrinsic::get_rounding:
2616 case Intrinsic::set_rounding:
2619 case Intrinsic::vscale: {
2623 case Intrinsic::scmp:
2624 MIRBuilder.
buildSCmp(getOrCreateVReg(CI),
2628 case Intrinsic::ucmp:
2629 MIRBuilder.
buildUCmp(getOrCreateVReg(CI),
2633 case Intrinsic::vector_extract:
2634 return translateExtractVector(CI, MIRBuilder);
2635 case Intrinsic::vector_insert:
2636 return translateInsertVector(CI, MIRBuilder);
2637 case Intrinsic::stepvector: {
2641 case Intrinsic::prefetch: {
2648 auto &MMO = *MF->getMachineMemOperand(MachinePointerInfo(Addr), Flags,
2651 MIRBuilder.
buildPrefetch(getOrCreateVReg(*Addr), RW, Locality, CacheType,
2657 case Intrinsic::vector_interleave2:
2658 case Intrinsic::vector_deinterleave2: {
2666 return translateVectorInterleave2Intrinsic(CI, MIRBuilder);
2668 return translateVectorDeinterleave2Intrinsic(CI, MIRBuilder);
2671#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2672 case Intrinsic::INTRINSIC:
2673#include "llvm/IR/ConstrainedOps.def"
2676 case Intrinsic::experimental_convergence_anchor:
2677 case Intrinsic::experimental_convergence_entry:
2678 case Intrinsic::experimental_convergence_loop:
2679 return translateConvergenceControlIntrinsic(CI,
ID, MIRBuilder);
2680 case Intrinsic::reloc_none: {
2683 MIRBuilder.
buildInstr(TargetOpcode::RELOC_NONE)
2691bool IRTranslator::translateInlineAsm(
const CallBase &CB,
2696 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2700 dbgs() <<
"Inline asm lowering is not supported for this target yet\n");
2705 MIRBuilder, CB, [&](
const Value &Val) {
return getOrCreateVRegs(Val); });
2708bool IRTranslator::translateCallBase(
const CallBase &CB,
2715 for (
const auto &Arg : CB.
args()) {
2717 assert(SwiftInVReg == 0 &&
"Expected only one swift error argument");
2719 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2720 MIRBuilder.
buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2721 &CB, &MIRBuilder.
getMBB(), Arg));
2724 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.
getMBB(), Arg);
2727 Args.push_back(getOrCreateVRegs(*Arg));
2731 if (ORE->enabled()) {
2733 MemoryOpRemark
R(*ORE,
"gisel-irtranslator-memsize", *DL, *LibInfo);
2739 std::optional<CallLowering::PtrAuthInfo> PAI;
2744 const Value *
Key = Bundle->Inputs[0];
2751 if (!CalleeCPA || !
isa<Function>(CalleeCPA->getPointer()) ||
2752 !CalleeCPA->isKnownCompatibleWith(
Key, Discriminator, *DL)) {
2754 Register DiscReg = getOrCreateVReg(*Discriminator);
2762 const auto &Token = *Bundle->Inputs[0].get();
2763 ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2769 bool Success = CLI->lowerCall(
2770 MIRBuilder, CB, Res, Args, SwiftErrorVReg, PAI, ConvergenceCtrlToken,
2775 assert(!HasTailCall &&
"Can't tail call return twice from block?");
2776 const TargetInstrInfo *
TII = MF->getSubtarget().getInstrInfo();
2792 if (
F && (
F->hasDLLImportStorageClass() ||
2793 (MF->getTarget().getTargetTriple().isOSWindows() &&
2794 F->hasExternalWeakLinkage())))
2806 return translateInlineAsm(CI, MIRBuilder);
2810 if (translateCallBase(CI, MIRBuilder)) {
2819 if (translateKnownIntrinsic(CI,
ID, MIRBuilder))
2822 TargetLowering::IntrinsicInfo
Info;
2823 bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(Info, CI, *MF,
ID);
2825 return translateIntrinsic(CI,
ID, MIRBuilder,
2826 IsTgtMemIntrinsic ? &Info :
nullptr);
2833bool IRTranslator::translateIntrinsic(
2838 ResultRegs = getOrCreateVRegs(CB);
2853 assert(CI->getBitWidth() <= 64 &&
2854 "large intrinsic immediates not handled");
2855 MIB.
addImm(CI->getSExtValue());
2860 auto *MD = MDVal->getMetadata();
2864 MDN =
MDNode::get(MF->getFunction().getContext(), ConstMD);
2871 if (VRegs.
size() > 1)
2878 if (TgtMemIntrinsicInfo) {
2881 Align Alignment = TgtMemIntrinsicInfo->
align.value_or(DL->getABITypeAlign(
2886 : LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());
2890 MachinePointerInfo MPI;
2891 if (TgtMemIntrinsicInfo->
ptrVal) {
2892 MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,
2893 TgtMemIntrinsicInfo->offset);
2895 MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);
2899 nullptr, TgtMemIntrinsicInfo->
ssid,
2905 auto *Token = Bundle->Inputs[0].get();
2906 Register TokenReg = getOrCreateVReg(*Token);
2917bool IRTranslator::findUnwindDestinations(
2939 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2945 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2946 UnwindDests.back().first->setIsEHScopeEntry();
2947 UnwindDests.back().first->setIsEHFuncletEntry();
2952 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2953 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2955 if (IsMSVCCXX || IsCoreCLR)
2956 UnwindDests.back().first->setIsEHFuncletEntry();
2958 UnwindDests.back().first->setIsEHScopeEntry();
2960 NewEHPadBB = CatchSwitch->getUnwindDest();
2965 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2966 if (BPI && NewEHPadBB)
2968 EHPadBB = NewEHPadBB;
2973bool IRTranslator::translateInvoke(
const User &U,
2976 MCContext &
Context = MF->getContext();
2981 const Function *Fn =
I.getCalledFunction();
2988 if (
I.hasDeoptState())
3002 (MF->getTarget().getTargetTriple().isOSWindows() &&
3006 bool LowerInlineAsm =
I.isInlineAsm();
3007 bool NeedEHLabel =
true;
3013 MIRBuilder.
buildInstr(TargetOpcode::G_INVOKE_REGION_START);
3014 BeginSymbol =
Context.createTempSymbol();
3018 if (LowerInlineAsm) {
3019 if (!translateInlineAsm(
I, MIRBuilder))
3021 }
else if (!translateCallBase(
I, MIRBuilder))
3026 EndSymbol =
Context.createTempSymbol();
3031 BranchProbabilityInfo *BPI = FuncInfo.BPI;
3032 MachineBasicBlock *InvokeMBB = &MIRBuilder.
getMBB();
3033 BranchProbability EHPadBBProb =
3037 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
3040 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
3041 &ReturnMBB = getMBB(*ReturnBB);
3043 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
3044 for (
auto &UnwindDest : UnwindDests) {
3045 UnwindDest.first->setIsEHPad();
3046 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3051 assert(BeginSymbol &&
"Expected a begin symbol!");
3052 assert(EndSymbol &&
"Expected an end symbol!");
3053 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
3056 MIRBuilder.
buildBr(ReturnMBB);
3062bool IRTranslator::translateCallBr(
const User &U,
3068 MachineBasicBlock *CallBrMBB = &MIRBuilder.
getMBB();
3071 if (
I.isInlineAsm()) {
3077 if (!translateIntrinsic(
I, IID, MIRBuilder))
3081 SmallPtrSet<BasicBlock *, 8> Dests = {
I.getDefaultDest()};
3082 MachineBasicBlock *
Return = &getMBB(*
I.getDefaultDest());
3091 for (BasicBlock *Dest :
I.getIndirectDests()) {
3092 MachineBasicBlock &
Target = getMBB(*Dest);
3093 Target.setIsInlineAsmBrIndirectTarget();
3094 Target.setLabelMustBeEmitted();
3096 if (Dests.
insert(Dest).second)
3108bool IRTranslator::translateLandingPad(
const User &U,
3112 MachineBasicBlock &
MBB = MIRBuilder.
getMBB();
3118 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
3119 if (TLI->getExceptionPointerRegister(PersonalityFn) == 0 &&
3120 TLI->getExceptionSelectorRegister(PersonalityFn) == 0)
3132 MIRBuilder.
buildInstr(TargetOpcode::EH_LABEL)
3137 const TargetRegisterInfo &
TRI = *MF->getSubtarget().getRegisterInfo();
3138 if (
auto *RegMask =
TRI.getCustomEHPadPreservedMask(*MF))
3139 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
3148 assert(Tys.
size() == 2 &&
"Only two-valued landingpads are supported");
3151 Register ExceptionReg = TLI->getExceptionPointerRegister(PersonalityFn);
3157 MIRBuilder.
buildCopy(ResRegs[0], ExceptionReg);
3159 Register SelectorReg = TLI->getExceptionSelectorRegister(PersonalityFn);
3164 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
3165 MIRBuilder.
buildCopy(PtrVReg, SelectorReg);
3166 MIRBuilder.
buildCast(ResRegs[1], PtrVReg);
3171bool IRTranslator::translateAlloca(
const User &U,
3179 Register Res = getOrCreateVReg(AI);
3180 int FI = getOrCreateFrameIndex(AI);
3186 if (MF->getTarget().getTargetTriple().isOSWindows())
3191 Type *IntPtrIRTy = DL->getIntPtrType(AI.
getType());
3193 if (MRI->getType(NumElts) != IntPtrTy) {
3194 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
3201 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
3203 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
3204 MIRBuilder.
buildMul(AllocSize, NumElts, TySize);
3209 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
3211 auto AllocAdd = MIRBuilder.
buildAdd(IntPtrTy, AllocSize, SAMinusOne,
3215 auto AlignedAlloc = MIRBuilder.
buildAnd(IntPtrTy, AllocAdd, AlignCst);
3218 if (Alignment <= StackAlign)
3219 Alignment =
Align(1);
3222 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
3223 assert(MF->getFrameInfo().hasVarSizedObjects());
3232 MIRBuilder.
buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
3233 {getOrCreateVReg(*
U.getOperand(0)),
3234 DL->getABITypeAlign(
U.getType()).value()});
3238bool IRTranslator::translateUnreachable(
const User &U,
3241 if (!UI.shouldLowerToTrap(MF->getTarget().Options.TrapUnreachable,
3242 MF->getTarget().Options.NoTrapAfterNoreturn))
3249bool IRTranslator::translateInsertElement(
const User &U,
3254 FVT && FVT->getNumElements() == 1)
3255 return translateCopy(U, *
U.getOperand(1), MIRBuilder);
3258 Register Val = getOrCreateVReg(*
U.getOperand(0));
3259 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3260 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3263 if (CI->getBitWidth() != PreferredVecIdxWidth) {
3264 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
3265 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3266 Idx = getOrCreateVReg(*NewIdxCI);
3270 Idx = getOrCreateVReg(*
U.getOperand(2));
3271 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3272 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3279bool IRTranslator::translateInsertVector(
const User &U,
3282 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3283 Register Elt = getOrCreateVReg(*
U.getOperand(1));
3286 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3291 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3296 ResultType && ResultType->getNumElements() == 1) {
3298 InputType && InputType->getNumElements() == 1) {
3302 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3308 Register Idx = getOrCreateVReg(*CI);
3316 Register Idx = getOrCreateVReg(*CI);
3317 auto ScaledIndex = MIRBuilder.
buildMul(
3318 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3325 getOrCreateVReg(U), getOrCreateVReg(*
U.getOperand(0)),
3330bool IRTranslator::translateExtractElement(
const User &U,
3334 if (
const FixedVectorType *FVT =
3336 if (FVT->getNumElements() == 1)
3337 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3340 Register Val = getOrCreateVReg(*
U.getOperand(0));
3341 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3346 auto *NewIdxCI = ConstantInt::get(CI->
getContext(), NewIdx);
3347 Idx = getOrCreateVReg(*NewIdxCI);
3351 Idx = getOrCreateVReg(*
U.getOperand(1));
3352 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3353 const LLT VecIdxTy =
LLT::scalar(PreferredVecIdxWidth);
3360bool IRTranslator::translateExtractVector(
const User &U,
3363 Register Vec = getOrCreateVReg(*
U.getOperand(0));
3365 unsigned PreferredVecIdxWidth = TLI->getVectorIdxWidth(*DL);
3370 CI = ConstantInt::get(CI->
getContext(), NewIdx);
3375 ResultType && ResultType->getNumElements() == 1) {
3377 InputType && InputType->getNumElements() == 1) {
3380 return translateCopy(U, *
U.getOperand(0), MIRBuilder);
3386 Register Idx = getOrCreateVReg(*CI);
3394 Register Idx = getOrCreateVReg(*CI);
3395 auto ScaledIndex = MIRBuilder.
buildMul(
3396 VecIdxTy, MIRBuilder.
buildVScale(VecIdxTy, 1), Idx);
3403 getOrCreateVReg(*
U.getOperand(0)),
3408bool IRTranslator::translateShuffleVector(
const User &U,
3414 if (
U.getOperand(0)->getType()->isScalableTy()) {
3415 Register Val = getOrCreateVReg(*
U.getOperand(0));
3417 MRI->getType(Val).getElementType(), Val, 0);
3424 Mask = SVI->getShuffleMask();
3435 unsigned M =
Mask[0];
3437 if (M == 0 || M == 1)
3438 return translateCopy(U, *
U.getOperand(M), MIRBuilder);
3444 Dst, getOrCreateVReg(*
U.getOperand(0)), M);
3445 }
else if (M < SrcElts * 2) {
3447 Dst, getOrCreateVReg(*
U.getOperand(1)), M - SrcElts);
3459 for (
int M : Mask) {
3461 if (M == 0 || M == 1) {
3462 Ops.push_back(getOrCreateVReg(*
U.getOperand(M)));
3464 if (!
Undef.isValid()) {
3465 Undef = MRI->createGenericVirtualRegister(SrcTy);
3475 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
3477 .
buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
3478 {getOrCreateVReg(*
U.getOperand(0)),
3479 getOrCreateVReg(*
U.getOperand(1))})
3480 .addShuffleMask(MaskAlloc);
3487 SmallVector<MachineInstr *, 4> Insts;
3488 for (
auto Reg : getOrCreateVRegs(PI)) {
3489 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_PHI, {
Reg}, {});
3493 PendingPHIs.emplace_back(&PI, std::move(Insts));
3497bool IRTranslator::translateAtomicCmpXchg(
const User &U,
3501 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3503 auto Res = getOrCreateVRegs(
I);
3506 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3507 Register Cmp = getOrCreateVReg(*
I.getCompareOperand());
3508 Register NewVal = getOrCreateVReg(*
I.getNewValOperand());
3511 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3512 *MF->getMachineMemOperand(
3513 MachinePointerInfo(
I.getPointerOperand()), Flags, MRI->getType(Cmp),
3514 getMemOpAlign(
I),
I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3515 I.getSuccessOrdering(),
I.getFailureOrdering()));
3519bool IRTranslator::translateAtomicRMW(
const User &U,
3525 auto Flags = TLI->getAtomicMemOperandFlags(
I, *DL);
3528 Register Addr = getOrCreateVReg(*
I.getPointerOperand());
3529 Register Val = getOrCreateVReg(*
I.getValOperand());
3531 unsigned Opcode = 0;
3532 switch (
I.getOperation()) {
3536 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3539 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3542 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3545 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3548 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3551 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3554 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3557 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3560 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3563 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3566 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3569 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3572 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3575 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3578 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3581 Opcode = TargetOpcode::G_ATOMICRMW_FMAXIMUM;
3584 Opcode = TargetOpcode::G_ATOMICRMW_FMINIMUM;
3587 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3590 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3593 Opcode = TargetOpcode::G_ATOMICRMW_USUB_COND;
3596 Opcode = TargetOpcode::G_ATOMICRMW_USUB_SAT;
3601 Opcode, Res, Addr, Val,
3602 *MF->getMachineMemOperand(MachinePointerInfo(
I.getPointerOperand()),
3603 Flags, MRI->getType(Val), getMemOpAlign(
I),
3604 I.getAAMetadata(),
nullptr,
I.getSyncScopeID(),
3609bool IRTranslator::translateFence(
const User &U,
3617bool IRTranslator::translateFreeze(
const User &U,
3623 "Freeze with different source and destination type?");
3625 for (
unsigned I = 0;
I < DstRegs.
size(); ++
I) {
3632void IRTranslator::finishPendingPhis() {
3635 GISelObserverWrapper WrapperObserver(&
Verifier);
3636 RAIIMFObsDelInstaller ObsInstall(*MF, WrapperObserver);
3638 for (
auto &Phi : PendingPHIs) {
3639 const PHINode *PI =
Phi.first;
3643 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3649 SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
3653 for (
auto *Pred : getMachinePredBBs({IRPred, PI->
getParent()})) {
3657 for (
unsigned j = 0;
j < ValRegs.
size(); ++
j) {
3658 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3667void IRTranslator::translateDbgValueRecord(
Value *V,
bool HasArgList,
3673 "Expected inlined-at fields to agree");
3677 if (!V || HasArgList) {
3695 auto *ExprDerefRemoved =
3701 if (translateIfEntryValueArgument(
false, V, Variable, Expression, DL,
3713void IRTranslator::translateDbgDeclareRecord(
Value *
Address,
bool HasArgList,
3719 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << *Variable <<
"\n");
3724 "Expected inlined-at fields to agree");
3729 MF->setVariableDbgInfo(Variable, Expression,
3730 getOrCreateFrameIndex(*AI), DL);
3734 if (translateIfEntryValueArgument(
true,
Address, Variable,
3746void IRTranslator::translateDbgInfo(
const Instruction &Inst,
3751 assert(DLR->getLabel() &&
"Missing label");
3752 assert(DLR->getLabel()->isValidLocationForIntrinsic(
3754 "Expected inlined-at fields to agree");
3763 translateDbgDeclareRecord(V, DVR.
hasArgList(), Variable, Expression,
3766 translateDbgValueRecord(V, DVR.
hasArgList(), Variable, Expression,
3771bool IRTranslator::translate(
const Instruction &Inst) {
3773 CurBuilder->setPCSections(Inst.
getMetadata(LLVMContext::MD_pcsections));
3774 CurBuilder->setMMRAMetadata(Inst.
getMetadata(LLVMContext::MD_mmra));
3776 if (TLI->fallBackToDAGISel(Inst))
3780#define HANDLE_INST(NUM, OPCODE, CLASS) \
3781 case Instruction::OPCODE: \
3782 return translate##OPCODE(Inst, *CurBuilder.get());
3783#include "llvm/IR/Instruction.def"
3792 if (
auto CurrInstDL = CurBuilder->getDL())
3793 EntryBuilder->setDebugLoc(
DebugLoc());
3799 EntryBuilder->buildConstant(
Reg, *CI);
3803 CF = ConstantFP::get(CF->getContext(), CF->getValue());
3804 EntryBuilder->buildFConstant(
Reg, *CF);
3806 EntryBuilder->buildUndef(
Reg);
3808 EntryBuilder->buildConstant(
Reg, 0);
3810 EntryBuilder->buildGlobalValue(
Reg, GV);
3812 Register Addr = getOrCreateVReg(*CPA->getPointer());
3813 Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
3814 EntryBuilder->buildConstantPtrAuth(
Reg, CPA, Addr, AddrDisc);
3816 Constant &Elt = *CAZ->getElementValue(0u);
3818 EntryBuilder->buildSplatVector(
Reg, getOrCreateVReg(Elt));
3822 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3824 return translateCopy(
C, Elt, *EntryBuilder);
3826 EntryBuilder->buildSplatBuildVector(
Reg, getOrCreateVReg(Elt));
3829 if (CV->getNumElements() == 1)
3830 return translateCopy(
C, *CV->getElementAsConstant(0), *EntryBuilder);
3832 for (
unsigned i = 0; i < CV->getNumElements(); ++i) {
3833 Constant &Elt = *CV->getElementAsConstant(i);
3834 Ops.push_back(getOrCreateVReg(Elt));
3836 EntryBuilder->buildBuildVector(
Reg,
Ops);
3838 switch(
CE->getOpcode()) {
3839#define HANDLE_INST(NUM, OPCODE, CLASS) \
3840 case Instruction::OPCODE: \
3841 return translate##OPCODE(*CE, *EntryBuilder.get());
3842#include "llvm/IR/Instruction.def"
3847 if (CV->getNumOperands() == 1)
3848 return translateCopy(
C, *CV->getOperand(0), *EntryBuilder);
3850 for (
unsigned i = 0; i < CV->getNumOperands(); ++i) {
3851 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3853 EntryBuilder->buildBuildVector(
Reg,
Ops);
3855 EntryBuilder->buildBlockAddress(
Reg, BA);
3862bool IRTranslator::finalizeBasicBlock(
const BasicBlock &BB,
3864 for (
auto &BTB : SL->BitTestCases) {
3867 emitBitTestHeader(BTB, BTB.Parent);
3869 BranchProbability UnhandledProb = BTB.Prob;
3870 for (
unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3871 UnhandledProb -= BTB.Cases[
j].ExtraProb;
3873 MachineBasicBlock *
MBB = BTB.Cases[
j].ThisBB;
3882 MachineBasicBlock *NextMBB;
3883 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3886 NextMBB = BTB.Cases[
j + 1].TargetBB;
3887 }
else if (j + 1 == ej) {
3889 NextMBB = BTB.Default;
3892 NextMBB = BTB.Cases[
j + 1].ThisBB;
3895 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j],
MBB);
3897 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3901 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3902 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3905 BTB.Cases.pop_back();
3911 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3912 BTB.Default->getBasicBlock()};
3913 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3914 if (!BTB.ContiguousRange) {
3915 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3918 SL->BitTestCases.clear();
3920 for (
auto &JTCase : SL->JTCases) {
3922 if (!JTCase.first.Emitted)
3923 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3925 emitJumpTable(JTCase.second, JTCase.second.MBB);
3927 SL->JTCases.clear();
3929 for (
auto &SwCase : SL->SwitchCases)
3930 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3931 SL->SwitchCases.clear();
3935 if (
SP.shouldEmitSDCheck(BB)) {
3936 bool FunctionBasedInstrumentation =
3937 TLI->getSSPStackGuardCheck(*MF->getFunction().getParent(), *Libcalls);
3938 SPDescriptor.initialize(&BB, &
MBB, FunctionBasedInstrumentation);
3941 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3944 }
else if (SPDescriptor.shouldEmitStackProtector()) {
3945 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3946 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3955 ParentMBB, *MF->getSubtarget().getInstrInfo());
3958 SuccessMBB->
splice(SuccessMBB->
end(), ParentMBB, SplitPoint,
3962 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3966 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3967 if (FailureMBB->
empty()) {
3968 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3973 SPDescriptor.resetPerBBState();
3980 CurBuilder->setInsertPt(*ParentBB, ParentBB->
end());
3984 LLT PtrMemTy =
getLLTForMVT(TLI->getPointerMemTy(*DL));
3990 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3997 ->buildLoad(PtrMemTy, StackSlotPtr,
4002 if (TLI->useStackGuardXorFP()) {
4003 LLVM_DEBUG(
dbgs() <<
"Stack protector xor'ing with FP not yet implemented");
4008 if (
const Function *GuardCheckFn = TLI->getSSPStackGuardCheck(M, *Libcalls)) {
4020 FunctionType *FnTy = GuardCheckFn->getFunctionType();
4021 assert(FnTy->getNumParams() == 1 &&
"Invalid function signature");
4022 ISD::ArgFlagsTy
Flags;
4023 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
4025 CallLowering::ArgInfo GuardArgInfo(
4026 {GuardVal, FnTy->getParamType(0), {
Flags}});
4028 CallLowering::CallLoweringInfo
Info;
4029 Info.OrigArgs.push_back(GuardArgInfo);
4030 Info.CallConv = GuardCheckFn->getCallingConv();
4033 if (!CLI->lowerCall(MIRBuilder, Info)) {
4034 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector check\n");
4046 getStackGuard(Guard, *CurBuilder);
4049 const Value *IRGuard = TLI->getSDagStackGuard(M, *Libcalls);
4050 Register GuardPtr = getOrCreateVReg(*IRGuard);
4053 ->buildLoad(PtrMemTy, GuardPtr,
4072 const RTLIB::LibcallImpl LibcallImpl =
4073 Libcalls->getLibcallImpl(RTLIB::STACKPROTECTOR_CHECK_FAIL);
4074 if (LibcallImpl == RTLIB::Unsupported)
4077 CurBuilder->setInsertPt(*FailureBB, FailureBB->
end());
4079 CallLowering::CallLoweringInfo
Info;
4080 Info.CallConv = Libcalls->getLibcallImplCallingConv(LibcallImpl);
4082 StringRef LibcallName =
4087 if (!CLI->lowerCall(*CurBuilder, Info)) {
4088 LLVM_DEBUG(
dbgs() <<
"Failed to lower call to stack protector fail\n");
4093 const TargetOptions &TargetOpts = TLI->getTargetMachine().Options;
4095 CurBuilder->buildInstr(TargetOpcode::G_TRAP);
4100void IRTranslator::finalizeFunction() {
4103 PendingPHIs.clear();
4105 FrameIndices.clear();
4106 MachinePreds.clear();
4110 EntryBuilder.reset();
4113 SPDescriptor.resetPerFunctionState();
4126 return CI && CI->isMustTailCall();
4133 ORE = std::make_unique<OptimizationRemarkEmitter>(&
F);
4134 CLI = MF->getSubtarget().getCallLowering();
4136 if (CLI->fallBackToDAGISel(*MF)) {
4138 F.getSubprogram(), &
F.getEntryBlock());
4139 R <<
"unable to lower function: "
4140 <<
ore::NV(
"Prototype",
F.getFunctionType());
4154 : TPC->isGISelCSEEnabled();
4160 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4161 CSEInfo = &
Wrapper.get(TPC->getCSEConfig());
4162 EntryBuilder->setCSEInfo(CSEInfo);
4163 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
4164 CurBuilder->setCSEInfo(CSEInfo);
4166 EntryBuilder = std::make_unique<MachineIRBuilder>();
4167 CurBuilder = std::make_unique<MachineIRBuilder>();
4170 CurBuilder->setMF(*MF);
4171 EntryBuilder->setMF(*MF);
4172 MRI = &MF->getRegInfo();
4173 DL = &
F.getDataLayout();
4183 FuncInfo.BPI =
nullptr;
4190 *
F.getParent(), Subtarget);
4192 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
4194 SL = std::make_unique<GISelSwitchLowering>(
this, FuncInfo);
4195 SL->init(*TLI, TM, *DL);
4197 assert(PendingPHIs.empty() &&
"stale PHIs");
4201 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
4204 F.getSubprogram(), &
F.getEntryBlock());
4205 R <<
"unable to translate in big endian mode";
4216 EntryBuilder->setMBB(*EntryBB);
4218 DebugLoc DbgLoc =
F.getEntryBlock().getFirstNonPHIIt()->getDebugLoc();
4219 SwiftError.setFunction(CurMF);
4220 SwiftError.createEntriesInEntryBlock(DbgLoc);
4222 bool IsVarArg =
F.isVarArg();
4223 bool HasMustTailInVarArgFn =
false;
4226 FuncInfo.MBBMap.resize(
F.getMaxBlockNumber());
4230 MBB = MF->CreateMachineBasicBlock(&BB);
4238 if (!BA->hasZeroLiveUses())
4242 if (!HasMustTailInVarArgFn)
4246 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
4249 EntryBB->addSuccessor(&getMBB(
F.front()));
4254 if (DL->getTypeStoreSize(Arg.
getType()).isZero())
4259 if (Arg.hasSwiftErrorAttr()) {
4260 assert(VRegs.
size() == 1 &&
"Too many vregs for Swift error");
4261 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
4265 if (!CLI->lowerFormalArguments(*EntryBuilder,
F, VRegArgs, FuncInfo)) {
4267 F.getSubprogram(), &
F.getEntryBlock());
4268 R <<
"unable to lower arguments: "
4269 <<
ore::NV(
"Prototype",
F.getFunctionType());
4276 if (EnableCSE && CSEInfo)
4281 DILocationVerifier Verifier;
4289 CurBuilder->setMBB(
MBB);
4290 HasTailCall =
false;
4300 Verifier.setCurrentInst(&Inst);
4304 translateDbgInfo(Inst, *CurBuilder);
4306 if (translate(Inst))
4311 R <<
"unable to translate instruction: " <<
ore::NV(
"Opcode", &Inst);
4313 if (ORE->allowExtraAnalysis(
"gisel-irtranslator")) {
4314 std::string InstStrStorage;
4318 R <<
": '" << InstStrStorage <<
"'";
4325 if (!finalizeBasicBlock(*BB,
MBB)) {
4327 BB->getTerminator()->getDebugLoc(), BB);
4328 R <<
"unable to translate basic block";
4338 finishPendingPhis();
4340 SwiftError.propagateVRegs();
4345 assert(EntryBB->succ_size() == 1 &&
4346 "Custom BB used for lowering should have only one successor");
4350 "LLVM-IR entry block has a predecessor!?");
4353 NewEntryBB.
splice(NewEntryBB.
begin(), EntryBB, EntryBB->begin(),
4362 EntryBB->removeSuccessor(&NewEntryBB);
4363 MF->remove(EntryBB);
4364 MF->deleteMachineBasicBlock(EntryBB);
4366 assert(&MF->front() == &NewEntryBB &&
4367 "New entry wasn't next in the list of basic block!");
4371 SP.copyToMachineFrameInfo(MF->getFrameInfo());
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This file implements a version of MachineIRBuilder which CSEs insts within a MachineBasicBlock.
This file describes how to lower LLVM calls to machine code calls.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
const HexagonInstrInfo * TII
static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB)
Returns true if a BasicBlock BB within a variadic function contains a variadic musttail call.
static bool targetSupportsBF16Type(const MachineFunction *MF)
static bool containsBF16Type(const User &U)
static unsigned getConvOpcode(Intrinsic::ID ID)
static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL)
static unsigned getConstrainedOpcode(Intrinsic::ID ID)
IRTranslator LLVM IR static false void reportTranslationError(MachineFunction &MF, OptimizationRemarkEmitter &ORE, OptimizationRemarkMissed &R)
static cl::opt< bool > EnableCSEInIRTranslator("enable-cse-in-irtranslator", cl::desc("Should enable CSE in irtranslator"), cl::Optional, cl::init(false))
static bool isValInBlock(const Value *V, const BasicBlock *BB)
static bool isSwiftError(const Value *V)
This file declares the IRTranslator pass.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Implement a low-level type suitable for MachineInstr level instruction selection.
Implement a low-level type suitable for MachineInstr level instruction selection.
Machine Check Debug Module
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
OptimizedStructLayoutField Field
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
verify safepoint Safepoint IR Verifier
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
const Value * getArraySize() const
Get the number of elements allocated.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An immutable pass that tracks lazily created AssumptionCache objects.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
LLVM Basic Block Representation.
unsigned getNumber() const
const Function * getParent() const
Return the enclosing method, or null if none.
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
The address of a basic block.
static LLVM_ABI BlockAddress * lookup(const BasicBlock *BB)
Lookup an existing BlockAddress constant for the given BasicBlock.
Legacy analysis pass which computes BlockFrequencyInfo.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Legacy analysis pass which computes BranchProbabilityInfo.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
static BranchProbability getOne()
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
bool isFPPredicate() const
bool isIntPredicate() const
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
ArrayRef< uint64_t > getElements() const
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this label.
A parsed version of the target data layout string in and methods for querying it.
Value * getAddress() const
DILabel * getLabel() const
DebugLoc getDebugLoc() const
Value * getValue(unsigned OpIdx=0) const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
bool isDbgDeclare() const
Class representing an expression and its matching format.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Constant * getPersonalityFn() const
Get the personality function associated with this function.
const Function & getFunction() const
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
Simple wrapper observer that takes several observers, and calls each one for each event.
void removeObserver(GISelChangeObserver *O)
void addObserver(GISelChangeObserver *O)
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isTailCall(const MachineInstr &MI) const override
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
IRTranslator(CodeGenOptLevel OptLevel=CodeGenOptLevel::None)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
This instruction inserts a struct field of array element value into an aggregate value.
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Value * getPointerOperand()
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
static LocationSize precise(uint64_t Value)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned pred_size() const
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
LLVM_ABI void sortUniqueLiveIns()
Sorts and uniques the LiveIns vector.
LLVM_ABI bool isPredecessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a predecessor of this block.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
int getStackProtectorIndex() const
Return the index for the stack protector object.
MachineFunctionPass(char &ID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildFPTOUI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOUI_SAT Src0.
MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_FREEZE Src.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Int = G_FMODF Src.
LLVMContext & getContext() const
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildResetFPMode()
Build and insert G_RESET_FPMODE.
MachineInstrBuilder buildFPTOSI_SAT(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_FPTOSI_SAT Src0.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildGetRounding(const DstOp &Dst)
Build and insert Dst = G_GET_ROUNDING.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, const SrcOp &Src2, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FMA Op0, Op1, Op2.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildSetFPMode(const SrcOp &Src)
Build and insert G_SET_FPMODE Src.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildSetRounding(const SrcOp &Src)
Build and insert G_SET_ROUNDING.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res, const SrcOp &Val, const int Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildResetFPEnv()
Build and insert G_RESET_FPENV.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
const DebugLoc & getDebugLoc()
Get the current instruction's debug location.
MachineInstrBuilder buildTrap(bool Debug=false)
Build and insert G_TRAP or G_DEBUGTRAP.
MachineInstrBuilder buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Fract, Exp = G_FFREXP Src.
MachineInstrBuilder buildFSincos(const DstOp &Sin, const DstOp &Cos, const SrcOp &Src, std::optional< unsigned > Flags=std::nullopt)
Build and insert Sin, Cos = G_FSINCOS Src.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FADD Op0, Op1.
MachineInstrBuilder buildSetFPEnv(const SrcOp &Src)
Build and insert G_SET_FPENV Src.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
static LLVM_ABI uint32_t copyFlagsFromInstruction(const Instruction &I)
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Class to install both of the above.
Wrapper class representing virtual and physical registers.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const Target & getTarget() const
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
Target-Independent Code Generator Pass Configuration Options.
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const CallLowering * getCallLowering() const
virtual const TargetLowering * getTargetLowering() const
bool isSPIRV() const
Tests whether the target is SPIR-V (32/64-bit/Logical).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isTokenTy() const
Return true if this is 'token'.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
constexpr bool isZero() const
const ParentTy * getParent() const
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebIgnore
This corresponds to "fpexcept.ignore".
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
gep_type_iterator gep_type_end(const User *GEP)
MachineBasicBlock::iterator findSplitPointForStackProtector(MachineBasicBlock *BB, const TargetInstrInfo &TII)
Find the split point at which to splice the end of BB into its success stack protector check machine ...
LLVM_ABI LLT getLLTForMVT(MVT Ty)
Get a rough equivalent of an LLT for a given MVT.
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
auto succ_size(const MachineBasicBlock *BB)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
CodeGenOptLevel
Code generation optimization level.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
@ Global
Append to llvm.global_dtors.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueLLTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
MachineBasicBlock * Parent
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
MachineBasicBlock * ThisBB
struct PredInfoPair PredInfo
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.
std::optional< unsigned > fallbackAddressSpace
MachineMemOperand::Flags flags
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
AtomicOrdering failureOrder