83using namespace llvm::gvn;
85using namespace PatternMatch;
87#define DEBUG_TYPE "gvn"
89STATISTIC(NumGVNInstr,
"Number of instructions deleted");
91STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
93STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
94STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
96STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
98STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
99 "Number of blocks speculated as available in "
100 "IsValueFullyAvailableInBlock(), max");
102 "Number of times we we reached gvn-max-block-speculations cut-off "
103 "preventing further exploration");
116 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
121 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
122 "into) when deducing if a value is fully available or not in GVN "
127 cl::desc(
"Max number of visited instructions when trying to find "
128 "dominating value of select dependency (default = 100)"));
259 return cast<LoadInst>(
Val);
264 return cast<MemIntrinsic>(
Val);
269 return cast<SelectInst>(
Val);
290 Res.
AV = std::move(
AV);
321 e.type =
I->getType();
322 e.opcode =
I->getOpcode();
327 e.varargs.push_back(
lookupOrAdd(GCR->getOperand(0)));
328 e.varargs.push_back(
lookupOrAdd(GCR->getBasePtr()));
329 e.varargs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
331 for (
Use &Op :
I->operands())
334 if (
I->isCommutative()) {
339 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
340 if (
e.varargs[0] >
e.varargs[1])
342 e.commutative =
true;
345 if (
auto *
C = dyn_cast<CmpInst>(
I)) {
348 if (
e.varargs[0] >
e.varargs[1]) {
352 e.opcode = (
C->getOpcode() << 8) | Predicate;
353 e.commutative =
true;
354 }
else if (
auto *
E = dyn_cast<InsertValueInst>(
I)) {
355 e.varargs.append(
E->idx_begin(),
E->idx_end());
356 }
else if (
auto *SVI = dyn_cast<ShuffleVectorInst>(
I)) {
358 e.varargs.append(ShuffleMask.
begin(), ShuffleMask.
end());
366 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
367 "Not a comparison!");
370 e.varargs.push_back(lookupOrAdd(LHS));
371 e.varargs.push_back(lookupOrAdd(RHS));
374 if (
e.varargs[0] >
e.varargs[1]) {
378 e.opcode = (Opcode << 8) | Predicate;
379 e.commutative =
true;
385 assert(EI &&
"Not an ExtractValueInst?");
396 e.varargs.push_back(lookupOrAdd(WO->
getLHS()));
397 e.varargs.push_back(lookupOrAdd(WO->
getRHS()));
405 e.varargs.push_back(lookupOrAdd(Op));
414 Type *PtrTy =
GEP->getType()->getScalarType();
416 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
420 GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
424 E.opcode =
GEP->getOpcode();
426 E.varargs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
427 for (
const auto &Pair : VariableOffsets) {
428 E.varargs.push_back(lookupOrAdd(Pair.first));
431 if (!ConstantOffset.isZero())
437 E.opcode =
GEP->getOpcode();
438 E.type =
GEP->getSourceElementType();
439 for (
Use &Op :
GEP->operands())
440 E.varargs.push_back(lookupOrAdd(Op));
449GVNPass::ValueTable::ValueTable() =
default;
450GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
451GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
452GVNPass::ValueTable::~ValueTable() =
default;
458 valueNumbering.insert(std::make_pair(V, num));
459 if (
PHINode *PN = dyn_cast<PHINode>(V))
460 NumberingPhi[num] = PN;
471 if (
C->getFunction()->isPresplitCoroutine()) {
472 valueNumbering[
C] = nextValueNumber;
473 return nextValueNumber++;
479 if (
C->isConvergent()) {
480 valueNumbering[
C] = nextValueNumber;
481 return nextValueNumber++;
484 if (AA->doesNotAccessMemory(
C)) {
486 uint32_t e = assignExpNewValueNum(exp).first;
487 valueNumbering[
C] = e;
491 if (MD && AA->onlyReadsMemory(
C)) {
493 auto ValNum = assignExpNewValueNum(exp);
495 valueNumbering[
C] = ValNum.first;
502 valueNumbering[
C] = nextValueNumber;
503 return nextValueNumber++;
506 if (local_dep.
isDef()) {
511 if (!local_cdep || local_cdep->
arg_size() !=
C->arg_size()) {
512 valueNumbering[
C] = nextValueNumber;
513 return nextValueNumber++;
516 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
517 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
520 valueNumbering[
C] = nextValueNumber;
521 return nextValueNumber++;
526 valueNumbering[
C] =
v;
539 if (
I.getResult().isNonLocal())
544 if (!
I.getResult().isDef() || cdep !=
nullptr) {
549 CallInst *NonLocalDepCall = dyn_cast<CallInst>(
I.getResult().getInst());
552 cdep = NonLocalDepCall;
561 valueNumbering[
C] = nextValueNumber;
562 return nextValueNumber++;
566 valueNumbering[
C] = nextValueNumber;
567 return nextValueNumber++;
569 for (
unsigned i = 0, e =
C->arg_size(); i < e; ++i) {
570 uint32_t c_vn = lookupOrAdd(
C->getArgOperand(i));
573 valueNumbering[
C] = nextValueNumber;
574 return nextValueNumber++;
579 valueNumbering[
C] =
v;
583 valueNumbering[
C] = nextValueNumber;
584 return nextValueNumber++;
588bool GVNPass::ValueTable::exists(
Value *V)
const {
589 return valueNumbering.count(V) != 0;
596 if (
VI != valueNumbering.end())
599 auto *
I = dyn_cast<Instruction>(V);
601 valueNumbering[V] = nextValueNumber;
602 return nextValueNumber++;
606 switch (
I->getOpcode()) {
607 case Instruction::Call:
608 return lookupOrAddCall(cast<CallInst>(
I));
609 case Instruction::FNeg:
610 case Instruction::Add:
611 case Instruction::FAdd:
612 case Instruction::Sub:
613 case Instruction::FSub:
614 case Instruction::Mul:
615 case Instruction::FMul:
616 case Instruction::UDiv:
617 case Instruction::SDiv:
618 case Instruction::FDiv:
619 case Instruction::URem:
620 case Instruction::SRem:
621 case Instruction::FRem:
622 case Instruction::Shl:
623 case Instruction::LShr:
624 case Instruction::AShr:
625 case Instruction::And:
626 case Instruction::Or:
627 case Instruction::Xor:
628 case Instruction::ICmp:
629 case Instruction::FCmp:
630 case Instruction::Trunc:
631 case Instruction::ZExt:
632 case Instruction::SExt:
633 case Instruction::FPToUI:
634 case Instruction::FPToSI:
635 case Instruction::UIToFP:
636 case Instruction::SIToFP:
637 case Instruction::FPTrunc:
638 case Instruction::FPExt:
639 case Instruction::PtrToInt:
640 case Instruction::IntToPtr:
641 case Instruction::AddrSpaceCast:
642 case Instruction::BitCast:
643 case Instruction::Select:
644 case Instruction::Freeze:
645 case Instruction::ExtractElement:
646 case Instruction::InsertElement:
647 case Instruction::ShuffleVector:
648 case Instruction::InsertValue:
651 case Instruction::GetElementPtr:
652 exp = createGEPExpr(cast<GetElementPtrInst>(
I));
654 case Instruction::ExtractValue:
655 exp = createExtractvalueExpr(cast<ExtractValueInst>(
I));
657 case Instruction::PHI:
658 valueNumbering[V] = nextValueNumber;
659 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
660 return nextValueNumber++;
662 valueNumbering[V] = nextValueNumber;
663 return nextValueNumber++;
666 uint32_t e = assignExpNewValueNum(exp).first;
667 valueNumbering[V] = e;
676 assert(
VI != valueNumbering.end() &&
"Value not numbered?");
679 return (
VI != valueNumbering.end()) ?
VI->second : 0;
686uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
690 return assignExpNewValueNum(exp).first;
694void GVNPass::ValueTable::clear() {
695 valueNumbering.clear();
696 expressionNumbering.clear();
697 NumberingPhi.clear();
698 PhiTranslateTable.clear();
706void GVNPass::ValueTable::erase(
Value *V) {
707 uint32_t Num = valueNumbering.lookup(V);
708 valueNumbering.erase(V);
711 NumberingPhi.erase(Num);
716void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
718 I = valueNumbering.begin(),
E = valueNumbering.end();
I !=
E; ++
I) {
719 assert(
I->first != V &&
"Inst still occurs in value numbering map!");
762 bool Changed = runImpl(
F, AC, DT, TLI, AA, MemDep, LI, &ORE,
763 MSSA ? &MSSA->getMSSA() :
nullptr);
779 OS, MapClassName2PassName);
782 if (Options.
AllowPRE != std::nullopt)
783 OS << (*Options.
AllowPRE ?
"" :
"no-") <<
"pre;";
788 <<
"split-backedge-load-pre;";
794#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
798 errs() <<
I.first <<
"\n";
829 std::optional<BasicBlock *> UnavailableBB;
833 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
841 while (!Worklist.
empty()) {
845 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
847 CurrBB, AvailabilityState::SpeculativelyAvailable);
852 if (State == AvailabilityState::Unavailable) {
853 UnavailableBB = CurrBB;
864 ++NumNewNewSpeculativelyAvailableBBs;
870 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
871 State = AvailabilityState::Unavailable;
872 UnavailableBB = CurrBB;
878 NewSpeculativelyAvailableBBs.
insert(CurrBB);
885 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
886 NumNewNewSpeculativelyAvailableBBs);
891 auto MarkAsFixpointAndEnqueueSuccessors =
893 auto It = FullyAvailableBlocks.
find(BB);
894 if (It == FullyAvailableBlocks.
end())
897 case AvailabilityState::Unavailable:
898 case AvailabilityState::Available:
900 case AvailabilityState::SpeculativelyAvailable:
901 State = FixpointState;
904 "Found a speculatively available successor leftover?");
919 while (!Worklist.
empty())
920 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
921 AvailabilityState::Unavailable);
928 while (!Worklist.
empty())
929 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
930 AvailabilityState::Available);
933 "Must have fixed all the new speculatively available blocks.");
936 return !UnavailableBB;
948 if (ValuesPerBlock.
size() == 1 &&
950 Load->getParent())) {
951 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
952 "Dead BB dominate this block");
953 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
959 SSAUpdate.
Initialize(Load->getType(), Load->getName());
964 if (AV.AV.isUndefValue())
974 if (BB == Load->getParent() &&
975 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
976 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
990 Type *LoadTy = Load->getType();
991 const DataLayout &
DL = Load->getModule()->getDataLayout();
992 if (isSimpleValue()) {
993 Res = getSimpleValue();
994 if (Res->
getType() != LoadTy) {
998 <<
" " << *getSimpleValue() <<
'\n'
1002 }
else if (isCoercedLoadValue()) {
1003 LoadInst *CoercedLoad = getCoercedLoadValue();
1018 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1020 {LLVMContext::MD_dereferenceable,
1021 LLVMContext::MD_dereferenceable_or_null,
1022 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1024 <<
" " << *getCoercedLoadValue() <<
'\n'
1028 }
else if (isMemIntrinValue()) {
1032 <<
" " << *getMemIntrinValue() <<
'\n'
1035 }
else if (isSelectValue()) {
1038 assert(V1 && V2 &&
"both value operands of the select must be present");
1043 assert(Res &&
"failed to materialize?");
1048 if (
const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1049 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1069 using namespace ore;
1074 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1077 for (
auto *U : Load->getPointerOperand()->users()) {
1078 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1079 auto *
I = cast<Instruction>(U);
1080 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1096 for (
auto *U : Load->getPointerOperand()->users()) {
1097 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1098 auto *
I = cast<Instruction>(U);
1099 if (
I->getFunction() == Load->getFunction() &&
1107 OtherAccess =
nullptr;
1119 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1121 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1134 for (
auto I = BB == FromBB ?
From->getReverseIterator() : BB->rbegin(),
1143 if (
auto *LI = dyn_cast<LoadInst>(Inst))
1144 if (LI->getPointerOperand() == Loc.
Ptr && LI->getType() == LoadTy)
1150std::optional<AvailableValue>
1153 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1163 if (
StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1165 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1177 if (
LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1181 if (DepLoad != Load &&
Address &&
1182 Load->isAtomic() <= DepLoad->isAtomic()) {
1191 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1205 if (
MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1218 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1222 return std::nullopt;
1235 if (
StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1241 return std::nullopt;
1244 if (S->isAtomic() <
Load->isAtomic())
1245 return std::nullopt;
1250 if (
LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1255 return std::nullopt;
1258 if (
LD->isAtomic() <
Load->isAtomic())
1259 return std::nullopt;
1267 if (
auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1268 assert(Sel->getType() ==
Load->getPointerOperandType());
1274 return std::nullopt;
1279 return std::nullopt;
1287 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1288 return std::nullopt;
1291void GVNPass::AnalyzeLoadAvailability(
LoadInst *Load, LoadDepVect &Deps,
1292 AvailValInBlkVect &ValuesPerBlock,
1293 UnavailBlkVect &UnavailableBlocks) {
1298 for (
const auto &Dep : Deps) {
1302 if (DeadBlocks.count(DepBB)) {
1310 UnavailableBlocks.push_back(DepBB);
1317 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1321 ValuesPerBlock.push_back(
1324 UnavailableBlocks.push_back(DepBB);
1328 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1329 "post condition violation");
1332void GVNPass::eliminatePartiallyRedundantLoad(
1333 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1335 for (
const auto &AvailableLoad : AvailableLoads) {
1336 BasicBlock *UnavailableBlock = AvailableLoad.first;
1337 Value *LoadPtr = AvailableLoad.second;
1341 Load->isVolatile(),
Load->getAlign(),
Load->getOrdering(),
1343 NewLoad->setDebugLoc(
Load->getDebugLoc());
1353 NewLoad, DefiningAcc, NewLoad->getParent(),
1355 if (
auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1358 MSSAU->
insertUse(cast<MemoryUse>(NewAccess),
true);
1364 NewLoad->setAAMetadata(Tags);
1366 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1367 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1368 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1369 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1370 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1371 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1372 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1375 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1384 ValuesPerBlock.push_back(
1393 Load->replaceAllUsesWith(V);
1394 if (isa<PHINode>(V))
1397 I->setDebugLoc(
Load->getDebugLoc());
1398 if (
V->getType()->isPtrOrPtrVectorTy())
1403 <<
"load eliminated by PRE";
1407bool GVNPass::PerformLoadPRE(
LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1408 UnavailBlkVect &UnavailableBlocks) {
1418 UnavailableBlocks.end());
1440 bool MustEnsureSafetyOfSpeculativeExecution =
1445 if (TmpBB == LoadBB)
1447 if (Blockers.count(TmpBB))
1459 MustEnsureSafetyOfSpeculativeExecution =
1460 MustEnsureSafetyOfSpeculativeExecution || ICF->
hasICF(TmpBB);
1471 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1472 for (
BasicBlock *UnavailableBB : UnavailableBlocks)
1473 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1479 if (Pred->getTerminator()->isEHPad()) {
1481 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1482 << Pred->getName() <<
"': " << *Load <<
'\n');
1490 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1491 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1493 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1494 << Pred->getName() <<
"': " << *Load <<
'\n');
1500 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1501 << Pred->getName() <<
"': " << *Load <<
'\n');
1510 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1511 << Pred->getName() <<
"': " << *Load <<
'\n');
1518 PredLoads[Pred] =
nullptr;
1523 unsigned NumUnavailablePreds = PredLoads.
size() + CriticalEdgePred.
size();
1524 assert(NumUnavailablePreds != 0 &&
1525 "Fully available value should already be eliminated!");
1526 (void)NumUnavailablePreds;
1532 if (NumUnavailablePreds != 1)
1537 if (MustEnsureSafetyOfSpeculativeExecution) {
1538 if (CriticalEdgePred.
size())
1541 for (
auto &PL : PredLoads)
1548 for (
BasicBlock *OrigPred : CriticalEdgePred) {
1549 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1550 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1551 PredLoads[NewPred] =
nullptr;
1552 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1553 << LoadBB->
getName() <<
'\n');
1557 bool CanDoPRE =
true;
1560 for (
auto &PredLoad : PredLoads) {
1561 BasicBlock *UnavailablePred = PredLoad.first;
1571 Value *LoadPtr =
Load->getPointerOperand();
1573 while (Cur != LoadBB) {
1586 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1593 << *
Load->getPointerOperand() <<
"\n");
1598 PredLoad.second = LoadPtr;
1602 while (!NewInsts.
empty()) {
1612 return !CriticalEdgePred.empty();
1618 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1620 <<
" INSTS: " << *NewInsts.
back()
1628 I->updateLocationAfterHoist();
1637 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1642bool GVNPass::performLoopLoadPRE(
LoadInst *Load,
1643 AvailValInBlkVect &ValuesPerBlock,
1644 UnavailBlkVect &UnavailableBlocks) {
1650 if (!L ||
L->getHeader() !=
Load->getParent())
1655 if (!Preheader || !Latch)
1658 Value *LoadPtr =
Load->getPointerOperand();
1660 if (!
L->isLoopInvariant(LoadPtr))
1670 for (
auto *Blocker : UnavailableBlocks) {
1672 if (!
L->contains(Blocker))
1696 if (Blocker->getTerminator()->mayWriteToMemory())
1699 LoopBlock = Blocker;
1712 AvailableLoads[LoopBlock] = LoadPtr;
1713 AvailableLoads[Preheader] = LoadPtr;
1715 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1716 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1723 using namespace ore;
1727 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1728 << setExtraArgs() <<
" in favor of "
1735bool GVNPass::processNonLocalLoad(
LoadInst *Load) {
1737 if (
Load->getParent()->getParent()->hasFnAttribute(
1738 Attribute::SanitizeAddress) ||
1739 Load->getParent()->getParent()->hasFnAttribute(
1740 Attribute::SanitizeHWAddress))
1750 unsigned NumDeps = Deps.size();
1757 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1759 dbgs() <<
" has unknown dependencies\n";);
1763 bool Changed =
false;
1766 dyn_cast<GetElementPtrInst>(
Load->getOperand(0))) {
1767 for (
Use &U :
GEP->indices())
1769 Changed |= performScalarPRE(
I);
1773 AvailValInBlkVect ValuesPerBlock;
1774 UnavailBlkVect UnavailableBlocks;
1775 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1779 if (ValuesPerBlock.empty())
1787 if (UnavailableBlocks.empty()) {
1788 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
1793 Load->replaceAllUsesWith(V);
1795 if (isa<PHINode>(V))
1801 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
1802 I->setDebugLoc(
Load->getDebugLoc());
1803 if (
V->getType()->isPtrOrPtrVectorTy())
1817 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1818 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1833 Cmp->getFastMathFlags().noNaNs())) {
1841 if (isa<ConstantFP>(
LHS) && !cast<ConstantFP>(
LHS)->
isZero())
1843 if (isa<ConstantFP>(
RHS) && !cast<ConstantFP>(
RHS)->
isZero())
1858 Cmp->getFastMathFlags().noNaNs()) ||
1867 if (isa<ConstantFP>(
LHS) && !cast<ConstantFP>(
LHS)->
isZero())
1869 if (isa<ConstantFP>(
RHS) && !cast<ConstantFP>(
RHS)->
isZero())
1879 auto *I = dyn_cast<Instruction>(U);
1880 return I && I->getParent() == BB;
1884bool GVNPass::processAssumeIntrinsic(
AssumeInst *IntrinsicI) {
1888 if (
Cond->isZero()) {
1906 for (
const auto &Acc : *AL) {
1907 if (
auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1908 if (!Current->getMemoryInst()->comesBefore(NewS)) {
1909 FirstNonDom = Current;
1925 MSSAU->
insertDef(cast<MemoryDef>(NewDef),
false);
1935 if (isa<Constant>(V)) {
1943 bool Changed =
false;
1950 Changed |= propagateEquality(V, True, Edge,
false);
1956 ReplaceOperandsWithMap[
V] = True;
1978 if (
auto *CmpI = dyn_cast<CmpInst>(V)) {
1980 Value *CmpLHS = CmpI->getOperand(0);
1981 Value *CmpRHS = CmpI->getOperand(1);
1987 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1989 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1991 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1992 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2003 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2007 << *CmpLHS <<
" with "
2008 << *CmpRHS <<
" in block "
2014 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2027 I->replaceAllUsesWith(Repl);
2032bool GVNPass::processLoad(
LoadInst *L) {
2037 if (!
L->isUnordered())
2040 if (
L->use_empty()) {
2050 return processNonLocalLoad(L);
2057 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2058 dbgs() <<
" has unknown dependence\n";);
2062 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2084std::pair<uint32_t, bool>
2085GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2087 bool CreateNewValNum = !
e;
2088 if (CreateNewValNum) {
2089 Expressions.push_back(Exp);
2090 if (ExprIdx.size() < nextValueNumber + 1)
2091 ExprIdx.resize(nextValueNumber * 2);
2092 e = nextValueNumber;
2093 ExprIdx[nextValueNumber++] = nextExprNumber++;
2095 return {
e, CreateNewValNum};
2102 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2103 while (Vals && Vals->BB == BB)
2112 auto FindRes = PhiTranslateTable.find({Num, Pred});
2113 if (FindRes != PhiTranslateTable.end())
2114 return FindRes->second;
2115 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2116 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2127 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2129 Call = dyn_cast<CallInst>(Vals->Val);
2130 if (Call && Call->getParent() == PhiBlock)
2135 if (AA->doesNotAccessMemory(Call))
2138 if (!MD || !AA->onlyReadsMemory(Call))
2150 if (
D.getResult().isNonFuncLocal())
2161 if (
PHINode *PN = NumberingPhi[Num]) {
2162 for (
unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2163 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2173 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2176 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2180 for (
unsigned i = 0; i <
Exp.varargs.size(); i++) {
2184 if ((i > 1 &&
Exp.opcode == Instruction::InsertValue) ||
2185 (i > 0 &&
Exp.opcode == Instruction::ExtractValue) ||
2186 (i > 1 &&
Exp.opcode == Instruction::ShuffleVector))
2188 Exp.varargs[i] = phiTranslate(Pred, PhiBlock,
Exp.varargs[i], Gvn);
2191 if (
Exp.commutative) {
2192 assert(
Exp.varargs.size() >= 2 &&
"Unsupported commutative instruction!");
2193 if (
Exp.varargs[0] >
Exp.varargs[1]) {
2196 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2197 Exp.opcode = (Opcode << 8) |
2203 if (
uint32_t NewNum = expressionNumbering[Exp]) {
2204 if (
Exp.opcode == Instruction::Call && NewNum != Num)
2205 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2213void GVNPass::ValueTable::eraseTranslateCacheEntry(
2216 PhiTranslateTable.erase({Num, Pred});
2225 LeaderTableEntry Vals = LeaderTable[num];
2226 if (!Vals.Val)
return nullptr;
2228 Value *Val =
nullptr;
2231 if (isa<Constant>(Val))
return Val;
2234 LeaderTableEntry* Next = Vals.Next;
2237 if (isa<Constant>(Next->Val))
return Next->Val;
2238 if (!Val) Val = Next->Val;
2257 const BasicBlock *Pred =
E.getEnd()->getSinglePredecessor();
2258 assert((!Pred || Pred ==
E.getStart()) &&
2259 "No edge between these basic blocks!");
2260 return Pred !=
nullptr;
2263void GVNPass::assignBlockRPONumber(
Function &
F) {
2264 BlockRPONumber.clear();
2268 BlockRPONumber[BB] = NextBlockNumber++;
2269 InvalidBlockRPONumbers =
false;
2272bool GVNPass::replaceOperandsForInBlockEquality(
Instruction *Instr)
const {
2273 bool Changed =
false;
2274 for (
unsigned OpNum = 0; OpNum < Instr->
getNumOperands(); ++OpNum) {
2276 auto it = ReplaceOperandsWithMap.find(Operand);
2277 if (it != ReplaceOperandsWithMap.end()) {
2279 << *it->second <<
" in instruction " << *Instr <<
'\n');
2292bool GVNPass::propagateEquality(
Value *LHS,
Value *RHS,
2294 bool DominatesByEdge) {
2296 Worklist.
push_back(std::make_pair(LHS, RHS));
2297 bool Changed =
false;
2302 while (!Worklist.
empty()) {
2303 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2304 LHS = Item.first;
RHS = Item.second;
2311 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2315 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2317 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) &&
"Unexpected value!");
2324 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2325 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2344 if (RootDominatesEnd && !isa<Instruction>(RHS))
2345 addToLeaderTable(LVN, RHS, Root.
getEnd());
2351 unsigned NumReplacements =
2356 Changed |= NumReplacements > 0;
2357 NumGVNEqProp += NumReplacements;
2376 bool isKnownFalse = !isKnownTrue;
2391 if (
CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2392 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2399 Worklist.
push_back(std::make_pair(Op0, Op1));
2411 if (Num < NextNum) {
2413 if (NotCmp && isa<Instruction>(NotCmp)) {
2414 unsigned NumReplacements =
2419 Changed |= NumReplacements > 0;
2420 NumGVNEqProp += NumReplacements;
2430 if (RootDominatesEnd)
2431 addToLeaderTable(Num, NotVal, Root.
getEnd());
2444 if (isa<DbgInfoIntrinsic>(
I))
2453 bool Changed =
false;
2454 if (!
I->use_empty()) {
2458 I->replaceAllUsesWith(V);
2466 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2473 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
2474 return processAssumeIntrinsic(Assume);
2476 if (
LoadInst *Load = dyn_cast<LoadInst>(
I)) {
2477 if (processLoad(Load))
2481 addToLeaderTable(Num, Load,
Load->getParent());
2488 if (!BI->isConditional())
2491 if (isa<Constant>(BI->getCondition()))
2492 return processFoldableCondBr(BI);
2494 Value *BranchCond = BI->getCondition();
2498 if (TrueSucc == FalseSucc)
2502 bool Changed =
false;
2506 Changed |= propagateEquality(BranchCond, TrueVal, TrueE,
true);
2510 Changed |= propagateEquality(BranchCond, FalseVal, FalseE,
true);
2517 Value *SwitchCond =
SI->getCondition();
2519 bool Changed =
false;
2523 for (
unsigned i = 0, n =
SI->getNumSuccessors(); i != n; ++i)
2524 ++SwitchEdges[
SI->getSuccessor(i)];
2530 if (SwitchEdges.
lookup(Dst) == 1) {
2532 Changed |= propagateEquality(SwitchCond, i->getCaseValue(),
E,
true);
2540 if (
I->getType()->isVoidTy())
2548 if (isa<AllocaInst>(
I) ||
I->isTerminator() || isa<PHINode>(
I)) {
2549 addToLeaderTable(Num,
I,
I->getParent());
2556 if (Num >= NextNum) {
2557 addToLeaderTable(Num,
I,
I->getParent());
2563 Value *Repl = findLeader(
I->getParent(), Num);
2566 addToLeaderTable(Num,
I,
I->getParent());
2600 InvalidBlockRPONumbers =
true;
2602 MSSAU = MSSA ? &Updater :
nullptr;
2604 bool Changed =
false;
2605 bool ShouldContinue =
true;
2615 Changed |= removedBlock;
2618 unsigned Iteration = 0;
2619 while (ShouldContinue) {
2622 ShouldContinue = iterateOnFunction(
F);
2623 Changed |= ShouldContinue;
2630 assignValNumForDeadCode();
2631 bool PREChanged =
true;
2632 while (PREChanged) {
2633 PREChanged = performPRE(
F);
2634 Changed |= PREChanged;
2643 cleanupGlobalSets();
2658 "We expect InstrsToErase to be empty across iterations");
2659 if (DeadBlocks.count(BB))
2663 ReplaceOperandsWithMap.clear();
2664 bool ChangedFunction =
false;
2674 if (!ReplaceOperandsWithMap.empty())
2675 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2676 ChangedFunction |= processInstruction(&*BI);
2678 if (InstrsToErase.
empty()) {
2684 NumGVNInstr += InstrsToErase.
size();
2687 bool AtStart = BI == BB->
begin();
2691 for (
auto *
I : InstrsToErase) {
2692 assert(
I->getParent() == BB &&
"Removing instruction from wrong block?");
2701 I->eraseFromParent();
2703 InstrsToErase.clear();
2711 return ChangedFunction;
2724 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2736 if (
Value *V = findLeader(Pred, TValNo)) {
2760 addToLeaderTable(Num, Instr, Pred);
2764bool GVNPass::performScalarPRE(
Instruction *CurInst) {
2765 if (isa<AllocaInst>(CurInst) || CurInst->
isTerminator() ||
2768 isa<DbgInfoIntrinsic>(CurInst))
2775 if (isa<CmpInst>(CurInst))
2785 if (isa<GetElementPtrInst>(CurInst))
2788 if (
auto *CallB = dyn_cast<CallBase>(CurInst)) {
2790 if (CallB->isInlineAsm())
2802 unsigned NumWith = 0;
2803 unsigned NumWithout = 0;
2808 if (InvalidBlockRPONumbers)
2809 assignBlockRPONumber(*CurrentBlock->
getParent());
2820 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
2821 "Invalid BlockRPONumber map.");
2822 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
2828 Value *predV = findLeader(
P, TValNo);
2833 }
else if (predV == CurInst) {
2845 if (NumWithout > 1 || NumWith == 0)
2853 if (NumWithout != 0) {
2872 toSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
2876 PREInstr = CurInst->
clone();
2877 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2887 assert(PREInstr !=
nullptr || NumWithout == 0);
2894 CurInst->
getName() +
".pre-phi", &CurrentBlock->
front());
2895 for (
unsigned i = 0, e = predMap.
size(); i != e; ++i) {
2896 if (
Value *V = predMap[i].first) {
2909 addToLeaderTable(ValNo, Phi, CurrentBlock);
2915 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2935 bool Changed =
false;
2938 if (CurrentBlock == &
F.getEntryBlock())
2942 if (CurrentBlock->isEHPad())
2946 BE = CurrentBlock->end();
2949 Changed |= performScalarPRE(CurInst);
2953 if (splitCriticalEdges())
2970 InvalidBlockRPONumbers =
true;
2977bool GVNPass::splitCriticalEdges() {
2978 if (toSplit.empty())
2981 bool Changed =
false;
2983 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2987 }
while (!toSplit.empty());
2991 InvalidBlockRPONumbers =
true;
2997bool GVNPass::iterateOnFunction(
Function &
F) {
2998 cleanupGlobalSets();
3001 bool Changed =
false;
3008 Changed |= processBlock(BB);
3013void GVNPass::cleanupGlobalSets() {
3015 LeaderTable.
clear();
3016 BlockRPONumber.clear();
3017 TableAllocator.
Reset();
3019 InvalidBlockRPONumbers =
true;
3024void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3029 for (
const auto &
I : LeaderTable) {
3030 const LeaderTableEntry *
Node = &
I.second;
3031 assert(
Node->Val != Inst &&
"Inst still in value numbering scope!");
3033 while (
Node->Next) {
3035 assert(
Node->Val != Inst &&
"Inst still in value numbering scope!");
3049 while (!NewDead.
empty()) {
3051 if (DeadBlocks.count(
D))
3057 DeadBlocks.insert(Dom.
begin(), Dom.
end());
3062 if (DeadBlocks.count(S))
3065 bool AllPredDead =
true;
3067 if (!DeadBlocks.count(
P)) {
3068 AllPredDead =
false;
3089 if (DeadBlocks.count(
B))
3096 if (!DeadBlocks.count(
P))
3102 DeadBlocks.insert(
P = S);
3108 if (!DeadBlocks.count(
P))
3132bool GVNPass::processFoldableCondBr(
BranchInst *BI) {
3146 if (DeadBlocks.count(DeadRoot))
3150 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3152 addDeadBlock(DeadRoot);
3160void GVNPass::assignValNumForDeadCode() {
3164 addToLeaderTable(ValNum, &Inst, BB);
3179 if (skipFunction(
F))
3182 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3184 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3185 return Impl.runImpl(
3186 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
3187 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3188 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
3189 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3190 Impl.isMemDepEnabled()
3191 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3193 LIWP ? &LIWP->getLoopInfo() :
nullptr,
3194 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3195 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3203 if (Impl.isMemDepEnabled())
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool hasUsersIn(Value *V, BasicBlock *BB)
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &gvn)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool impliesEquivalanceIfFalse(CmpInst *Cmp)
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static bool impliesEquivalanceIfTrue(CmpInst *Cmp)
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Module.h This file contains the declarations for the Module class.
ppc ctr loops PowerPC CTR Loops Verify
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This defines the Use class.
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
const BasicBlock * getEnd() const
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Analysis pass which computes a DominatorTree.
void getDescendants(NodeT *R, SmallVectorImpl< NodeT * > &Result) const
Get all nodes dominated by R, including R itself.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Returns the value number of the given comparison, assigning it a new number if it did not have one be...
uint32_t getNextUnusedValueNumber()
uint32_t lookupOrAdd(Value *V)
lookup_or_add - Returns the value number for the specified value, assigning it a new number if it did...
uint32_t lookup(Value *V, bool Verify=true) const
Returns the value number of the specified value.
void setAliasAnalysis(AAResults *A)
void clear()
Remove all entries from the ValueTable.
bool exists(Value *V) const
Returns true if a value number exists for the specified value.
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock, uint32_t Num, GVNPass &Gvn)
Wrap phiTranslateImpl to provide caching functionality.
void setMemDep(MemoryDependenceResults *M)
void erase(Value *v)
Remove a value from the value numbering.
void add(Value *V, uint32_t num)
add - Insert a value into the table with a specified value number.
void setDomTree(DominatorTree *D)
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock)
Erase stale entry from phiTranslate cache so phiTranslate can be computed again.
void verifyRemoved(const Value *) const
verifyRemoved - Verify that the value is removed from all internal data structures.
The core GVN pass object.
friend class gvn::GVNLegacyPass
bool isPREEnabled() const
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
AAResults * getAliasAnalysis() const
bool isLoadPREEnabled() const
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
DominatorTree & getDominatorTree() const
bool isLoadInLoopPREEnabled() const
bool isLoadPRESplitBackedgeEnabled() const
void markInstructionForDeletion(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
bool isMemDepEnabled() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Legacy wrapper pass to provide the GlobalsAAResult object.
This class allows to keep track on instructions with implicit control flow.
bool isDominatedByICFIFromSameBlock(const Instruction *Insn)
Returns true if the first ICFI of Insn's block exists and dominates Insn.
bool hasICF(const BasicBlock *BB)
Returns true if at least one instruction from the given basic block has implicit control flow.
void clear()
Invalidates all information from this tracking.
void removeUsersOf(const Instruction *Inst)
Notifies this tracking that we are going to replace all uses of Inst.
void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB)
Notifies this tracking that we are going to insert a new instruction Inst to the basic block BB.
void removeInstruction(const Instruction *Inst)
Notifies this tracking that we are going to remove the instruction Inst It makes all necessary update...
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
const BasicBlock * getParent() const
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs)
Drop all unknown metadata except for debug locations.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
This class implements a map that also provides access to all stored values in a deterministic order.
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
An analysis that produces MemoryDependenceResults for a function.
Provides a lazy, caching interface for making common memory aliasing information queries,...
std::vector< NonLocalDepEntry > NonLocalDepInfo
void invalidateCachedPredecessors()
Clears the PredIteratorCache info.
void invalidateCachedPointerInfo(Value *Ptr)
Invalidates cached information about the specified pointer, because it may be too conservative in mem...
std::optional< int32_t > getClobberOffset(LoadInst *DepInst) const
Return the clobber offset to dependent instruction.
void removeInstruction(Instruction *InstToRemove)
Removes an instruction from the dependence analysis, updating the dependence of instructions that pre...
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
void getNonLocalPointerDependency(Instruction *QueryInst, SmallVectorImpl< NonLocalDepResult > &Result)
Perform a full dependency query for an access to the QueryInst's specified memory location,...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
const Value * Ptr
The address of the start of the location.
An analysis that produces MemorySSA for a function.
MemorySSA * getMemorySSA() const
Get handle on MemorySSA.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before or after an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point)
Create a MemoryAccess in MemorySSA at a specified point in a block, with a specified clobbering defin...
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
Encapsulates MemorySSA, including all data associated with memory accesses.
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
This is an entry in the NonLocalDepInfo cache.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
void setIncomingValueForBlock(const BasicBlock *BB, Value *V)
Set every incoming value(s) for block BB to V.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PHITransAddr - An address value which tracks and handles phi translation.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
char front() const
front - Get the first character in the string.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isOpaquePointerTy() const
True if this is an instance of an opaque PointerType.
bool isVoidTy() const
Return true if this is 'void'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
void deleteValue()
Delete a pointer to a generic Value.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
An efficient, type-erasing, non-owning reference to a callable.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::unique_ptr< ValueIDNum[]> ValueTable
Type for a table of values in a block.
@ C
The default llvm calling convention, compatible with C.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
bool match(Val *V, const Pattern &P)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
Interval::succ_iterator succ_end(Interval *I)
hash_code hash_value(const FixedPointSemantics &Val)
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
void initializeGVNLegacyPassPass(PassRegistry &)
Interval::pred_iterator pred_end(Interval *I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isModSet(const ModRefInfo MRI)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
bool VerifyMemorySSA
Enables verification of MemorySSA.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
bool isAssumeWithEmptyBundle(AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Option class for critical edge splitting.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static unsigned getHashValue(const GVNPass::Expression &e)
static GVNPass::Expression getEmptyKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
std::optional< bool > AllowLoadPRESplitBackedge
std::optional< bool > AllowPRE
std::optional< bool > AllowLoadInLoopPRE
std::optional< bool > AllowMemDep
std::optional< bool > AllowLoadPRE
SmallVector< uint32_t, 4 > varargs
bool operator==(const Expression &other) const
friend hash_code hash_value(const Expression &Value)
Expression(uint32_t o=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
Value * MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const
Emit code at the end of this block to adjust the value defined here to the specified type.
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, GVNPass &gvn) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const