88#define DEBUG_TYPE "gvn"
90STATISTIC(NumGVNInstr,
"Number of instructions deleted");
92STATISTIC(NumGVNPRE,
"Number of instructions PRE'd");
94STATISTIC(NumGVNSimpl,
"Number of instructions simplified");
95STATISTIC(NumGVNEqProp,
"Number of equalities propagated");
97STATISTIC(NumPRELoopLoad,
"Number of loop loads PRE'd");
99 "Number of loads moved to predecessor of a critical edge in PRE");
101STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
102 "Number of blocks speculated as available in "
103 "IsValueFullyAvailableInBlock(), max");
105 "Number of times we we reached gvn-max-block-speculations cut-off "
106 "preventing further exploration");
121 cl::desc(
"Max number of dependences to attempt Load PRE (default = 100)"));
126 cl::desc(
"Max number of blocks we're willing to speculate on (and recurse "
127 "into) when deducing if a value is fully available or not in GVN "
132 cl::desc(
"Max number of visited instructions when trying to find "
133 "dominating value of select dependency (default = 100)"));
137 cl::desc(
"Max number of instructions to scan in each basic block in GVN "
161 if ((!
Attrs.isEmpty() || !
Other.Attrs.isEmpty()) &&
162 !
Attrs.intersectWith(
Ty->getContext(),
Other.Attrs).has_value())
299 Res.
AV = std::move(
AV);
320 return AV.MaterializeAdjustedValue(Load,
BB->getTerminator());
331 E.Opcode =
I->getOpcode();
336 E.VarArgs.push_back(
lookupOrAdd(GCR->getOperand(0)));
337 E.VarArgs.push_back(
lookupOrAdd(GCR->getBasePtr()));
338 E.VarArgs.push_back(
lookupOrAdd(GCR->getDerivedPtr()));
340 for (
Use &
Op :
I->operands())
343 if (
I->isCommutative()) {
348 assert(
I->getNumOperands() >= 2 &&
"Unsupported commutative instruction!");
349 if (
E.VarArgs[0] >
E.VarArgs[1])
351 E.Commutative =
true;
357 if (
E.VarArgs[0] >
E.VarArgs[1]) {
362 E.Commutative =
true;
364 E.VarArgs.append(IVI->idx_begin(), IVI->idx_end());
366 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
367 E.VarArgs.append(ShuffleMask.
begin(), ShuffleMask.
end());
369 E.Attrs = CB->getAttributes();
375GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
377 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
378 "Not a comparison!");
381 E.VarArgs.push_back(lookupOrAdd(
LHS));
382 E.VarArgs.push_back(lookupOrAdd(
RHS));
385 if (
E.VarArgs[0] >
E.VarArgs[1]) {
389 E.Opcode = (Opcode << 8) | Predicate;
390 E.Commutative =
true;
395GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
396 assert(EI &&
"Not an ExtractValueInst?");
407 E.VarArgs.push_back(lookupOrAdd(WO->
getLHS()));
408 E.VarArgs.push_back(lookupOrAdd(WO->
getRHS()));
416 E.VarArgs.push_back(lookupOrAdd(
Op));
423GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *
GEP) {
425 Type *PtrTy =
GEP->getType()->getScalarType();
426 const DataLayout &
DL =
GEP->getDataLayout();
427 unsigned BitWidth =
DL.getIndexTypeSizeInBits(PtrTy);
428 SmallMapVector<Value *, APInt, 4> VariableOffsets;
430 if (
GEP->collectOffset(
DL,
BitWidth, VariableOffsets, ConstantOffset)) {
434 E.Opcode =
GEP->getOpcode();
436 E.VarArgs.push_back(lookupOrAdd(
GEP->getPointerOperand()));
437 for (
const auto &[V, Scale] : VariableOffsets) {
438 E.VarArgs.push_back(lookupOrAdd(V));
439 E.VarArgs.push_back(lookupOrAdd(ConstantInt::get(
Context, Scale)));
441 if (!ConstantOffset.isZero())
443 lookupOrAdd(ConstantInt::get(
Context, ConstantOffset)));
447 E.Opcode =
GEP->getOpcode();
448 E.Ty =
GEP->getSourceElementType();
449 for (Use &
Op :
GEP->operands())
450 E.VarArgs.push_back(lookupOrAdd(
Op));
459GVNPass::ValueTable::ValueTable() =
default;
460GVNPass::ValueTable::ValueTable(
const ValueTable &) =
default;
461GVNPass::ValueTable::ValueTable(
ValueTable &&) =
default;
462GVNPass::ValueTable::~ValueTable() =
default;
468 ValueNumbering.
insert(std::make_pair(V, Num));
470 NumberingPhi[Num] = PN;
480 assert(MSSA &&
"addMemoryStateToExp should not be called without MemorySSA");
481 assert(MSSA->getMemoryAccess(
I) &&
"Instruction does not access memory");
482 MemoryAccess *MA = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(
I);
483 Exp.VarArgs.push_back(lookupOrAdd(MA));
494 if (
C->getFunction()->isPresplitCoroutine()) {
495 ValueNumbering[
C] = NextValueNumber;
496 return NextValueNumber++;
502 if (
C->isConvergent()) {
503 ValueNumbering[
C] = NextValueNumber;
504 return NextValueNumber++;
507 if (AA->doesNotAccessMemory(
C)) {
509 uint32_t
E = assignExpNewValueNum(Exp).first;
510 ValueNumbering[
C] =
E;
514 if (MD && AA->onlyReadsMemory(
C)) {
516 auto [
E, IsValNumNew] = assignExpNewValueNum(Exp);
518 ValueNumbering[
C] =
E;
522 MemDepResult LocalDep = MD->getDependency(
C);
525 ValueNumbering[
C] = NextValueNumber;
526 return NextValueNumber++;
529 if (LocalDep.
isDef()) {
534 if (!LocalDepCall || LocalDepCall->
arg_size() !=
C->arg_size()) {
535 ValueNumbering[
C] = NextValueNumber;
536 return NextValueNumber++;
539 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
540 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
541 uint32_t LocalDepCallVN = lookupOrAdd(LocalDepCall->
getArgOperand(
I));
542 if (CVN != LocalDepCallVN) {
543 ValueNumbering[
C] = NextValueNumber;
544 return NextValueNumber++;
548 uint32_t
V = lookupOrAdd(LocalDepCall);
549 ValueNumbering[
C] =
V;
555 MD->getNonLocalCallDependency(
C);
557 CallInst *CDep =
nullptr;
561 for (
const NonLocalDepEntry &
I : Deps) {
562 if (
I.getResult().isNonLocal())
567 if (!
I.getResult().isDef() || CDep !=
nullptr) {
574 if (NonLocalDepCall && DT->properlyDominates(
I.getBB(),
C->getParent())) {
575 CDep = NonLocalDepCall;
584 ValueNumbering[
C] = NextValueNumber;
585 return NextValueNumber++;
589 ValueNumbering[
C] = NextValueNumber;
590 return NextValueNumber++;
592 for (
unsigned I = 0,
E =
C->arg_size();
I <
E; ++
I) {
593 uint32_t CVN = lookupOrAdd(
C->getArgOperand(
I));
596 ValueNumbering[
C] = NextValueNumber;
597 return NextValueNumber++;
601 uint32_t
V = lookupOrAdd(CDep);
602 ValueNumbering[
C] =
V;
606 if (MSSA && IsMSSAEnabled && AA->onlyReadsMemory(
C)) {
608 addMemoryStateToExp(
C, Exp);
609 auto [
V,
_] = assignExpNewValueNum(Exp);
610 ValueNumbering[
C] =
V;
614 ValueNumbering[
C] = NextValueNumber;
615 return NextValueNumber++;
619uint32_t GVNPass::ValueTable::computeLoadStoreVN(Instruction *
I) {
620 if (!MSSA || !IsMSSAEnabled) {
621 ValueNumbering[
I] = NextValueNumber;
622 return NextValueNumber++;
626 Exp.Ty =
I->getType();
627 Exp.Opcode =
I->getOpcode();
628 for (Use &
Op :
I->operands())
629 Exp.VarArgs.push_back(lookupOrAdd(
Op));
630 addMemoryStateToExp(
I, Exp);
632 auto [
V,
_] = assignExpNewValueNum(Exp);
633 ValueNumbering[
I] =
V;
638bool GVNPass::ValueTable::exists(
Value *V)
const {
639 return ValueNumbering.contains(V);
652 if (VI != ValueNumbering.end())
657 ValueNumbering[V] = NextValueNumber;
660 return NextValueNumber++;
664 switch (
I->getOpcode()) {
665 case Instruction::Call:
667 case Instruction::FNeg:
668 case Instruction::Add:
669 case Instruction::FAdd:
670 case Instruction::Sub:
671 case Instruction::FSub:
672 case Instruction::Mul:
673 case Instruction::FMul:
674 case Instruction::UDiv:
675 case Instruction::SDiv:
676 case Instruction::FDiv:
677 case Instruction::URem:
678 case Instruction::SRem:
679 case Instruction::FRem:
680 case Instruction::Shl:
681 case Instruction::LShr:
682 case Instruction::AShr:
683 case Instruction::And:
684 case Instruction::Or:
685 case Instruction::Xor:
686 case Instruction::ICmp:
687 case Instruction::FCmp:
688 case Instruction::Trunc:
689 case Instruction::ZExt:
690 case Instruction::SExt:
691 case Instruction::FPToUI:
692 case Instruction::FPToSI:
693 case Instruction::UIToFP:
694 case Instruction::SIToFP:
695 case Instruction::FPTrunc:
696 case Instruction::FPExt:
697 case Instruction::PtrToInt:
698 case Instruction::PtrToAddr:
699 case Instruction::IntToPtr:
700 case Instruction::AddrSpaceCast:
701 case Instruction::BitCast:
702 case Instruction::Select:
703 case Instruction::Freeze:
704 case Instruction::ExtractElement:
705 case Instruction::InsertElement:
706 case Instruction::ShuffleVector:
707 case Instruction::InsertValue:
710 case Instruction::GetElementPtr:
713 case Instruction::ExtractValue:
716 case Instruction::PHI:
717 ValueNumbering[V] = NextValueNumber;
719 return NextValueNumber++;
720 case Instruction::Load:
721 case Instruction::Store:
722 return computeLoadStoreVN(
I);
724 ValueNumbering[V] = NextValueNumber;
725 return NextValueNumber++;
728 uint32_t E = assignExpNewValueNum(Exp).first;
729 ValueNumbering[V] = E;
738 assert(VI != ValueNumbering.end() &&
"Value not numbered?");
741 return (VI != ValueNumbering.end()) ? VI->second : 0;
748uint32_t GVNPass::ValueTable::lookupOrAddCmp(
unsigned Opcode,
751 Expression Exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
752 return assignExpNewValueNum(Exp).first;
757 ValueNumbering.clear();
758 ExpressionNumbering.clear();
759 NumberingPhi.clear();
761 PhiTranslateTable.clear();
770 uint32_t Num = ValueNumbering.lookup(V);
771 ValueNumbering.erase(V);
774 NumberingPhi.erase(Num);
776 NumberingBB.erase(Num);
781void GVNPass::ValueTable::verifyRemoved(
const Value *V)
const {
782 assert(!ValueNumbering.contains(V) &&
783 "Inst still occurs in value numbering map!");
792 LeaderListNode &Curr = NumToLeaders[
N];
793 if (!Curr.Entry.Val) {
799 LeaderListNode *
Node = TableAllocator.Allocate<LeaderListNode>();
802 Node->Next = Curr.Next;
810 LeaderListNode *Prev =
nullptr;
811 LeaderListNode *Curr = &NumToLeaders[
N];
813 while (Curr && (Curr->Entry.Val !=
I || Curr->Entry.BB != BB)) {
822 Prev->Next = Curr->Next;
825 Curr->Entry.Val =
nullptr;
826 Curr->Entry.BB =
nullptr;
828 LeaderListNode *
Next = Curr->Next;
829 Curr->Entry.Val =
Next->Entry.Val;
830 Curr->Entry.BB =
Next->Entry.BB;
831 Curr->Next =
Next->Next;
836void GVNPass::LeaderMap::verifyRemoved(
const Value *V)
const {
839 for (
const auto &
I : NumToLeaders) {
841 assert(
I.second.Entry.Val != V &&
"Inst still in value numbering scope!");
843 std::none_of(leader_iterator(&
I.second), leader_iterator(
nullptr),
844 [=](
const LeaderTableEntry &
E) {
return E.Val ==
V; }) &&
845 "Inst still in value numbering scope!");
866 return Options.AllowLoadPRESplitBackedge.value_or(
893 "On-demand computation of MemSSA implies that MemDep is disabled!");
897 bool Changed = runImpl(
F, AC, DT, TLI,
AA, MemDep, LI, &ORE,
898 MSSA ? &MSSA->getMSSA() :
nullptr);
913 OS, MapClassName2PassName);
916 if (Options.AllowPRE != std::nullopt)
917 OS << (*Options.AllowPRE ?
"" :
"no-") <<
"pre;";
918 if (Options.AllowLoadPRE != std::nullopt)
919 OS << (*Options.AllowLoadPRE ?
"" :
"no-") <<
"load-pre;";
920 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
921 OS << (*Options.AllowLoadPRESplitBackedge ?
"" :
"no-")
922 <<
"split-backedge-load-pre;";
923 if (Options.AllowMemDep != std::nullopt)
924 OS << (*Options.AllowMemDep ?
"" :
"no-") <<
"memdep;";
925 if (Options.AllowMemorySSA != std::nullopt)
926 OS << (*Options.AllowMemorySSA ?
"" :
"no-") <<
"memoryssa";
933 removeInstruction(
I);
936#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
939 for (
const auto &[Num, Exp] : Map) {
940 errs() << Num <<
"\n";
971 std::optional<BasicBlock *> UnavailableBB;
975 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
983 while (!Worklist.
empty()) {
987 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator,
bool>
IV =
995 UnavailableBB = CurrBB;
1006 ++NumNewNewSpeculativelyAvailableBBs;
1012 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
1014 UnavailableBB = CurrBB;
1020 NewSpeculativelyAvailableBBs.
insert(CurrBB);
1026#if LLVM_ENABLE_STATS
1027 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
1028 NumNewNewSpeculativelyAvailableBBs);
1033 auto MarkAsFixpointAndEnqueueSuccessors =
1035 auto It = FullyAvailableBlocks.
find(BB);
1036 if (It == FullyAvailableBlocks.
end())
1043 State = FixpointState;
1046 "Found a speculatively available successor leftover?");
1054 if (UnavailableBB) {
1061 while (!Worklist.
empty())
1062 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1070 while (!Worklist.
empty())
1071 MarkAsFixpointAndEnqueueSuccessors(Worklist.
pop_back_val(),
1075 "Must have fixed all the new speculatively available blocks.");
1078 return !UnavailableBB;
1087 if (V.AV.Val == OldValue)
1088 V.AV.Val = NewValue;
1089 if (V.AV.isSelectValue()) {
1090 if (V.AV.V1 == OldValue)
1092 if (V.AV.V2 == OldValue)
1107 if (ValuesPerBlock.
size() == 1 &&
1109 Load->getParent())) {
1110 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
1111 "Dead BB dominate this block");
1112 return ValuesPerBlock[0].MaterializeAdjustedValue(Load);
1118 SSAUpdate.
Initialize(Load->getType(), Load->getName());
1123 if (AV.AV.isUndefValue())
1133 if (BB == Load->getParent() &&
1134 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
1135 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
1148 Type *LoadTy = Load->getType();
1152 if (Res->
getType() != LoadTy) {
1167 Load->getFunction());
1177 if (!CoercedLoad->
hasMetadata(LLVMContext::MD_noundef))
1179 {LLVMContext::MD_dereferenceable,
1180 LLVMContext::MD_dereferenceable_or_null,
1181 LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1197 assert(
V1 &&
V2 &&
"both value operands of the select must be present");
1206 assert(Res &&
"failed to materialize?");
1212 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1229 Value *PtrOp = Load->getPointerOperand();
1235 for (
auto *U : PtrOp->
users()) {
1238 if (
I->getFunction() == Load->getFunction() && DT->
dominates(
I, Load)) {
1256 for (
auto *U : PtrOp->
users()) {
1259 if (
I->getFunction() == Load->getFunction() &&
1267 OtherAccess =
nullptr;
1286 using namespace ore;
1289 R <<
"load of type " << NV(
"Type", Load->getType()) <<
" not eliminated"
1294 R <<
" in favor of " << NV(
"OtherAccess", OtherAccess);
1296 R <<
" because it is clobbered by " << NV(
"ClobberedBy", DepInfo.
getInst());
1309 for (
auto *Inst = BB == FromBB ? From : BB->
getTerminator();
1317 if (LI->getPointerOperand() ==
Loc.Ptr && LI->getType() == LoadTy)
1323std::optional<AvailableValue>
1324GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1326 assert(
Load->isUnordered() &&
"rules below are incorrect for ordered access");
1331 const DataLayout &
DL =
Load->getDataLayout();
1338 if (
Address &&
Load->isAtomic() <= DepSI->isAtomic()) {
1354 if (DepLoad != Load &&
Address &&
1355 Load->isAtomic() <= DepLoad->isAtomic()) {
1362 DepLoad->getFunction())) {
1363 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1365 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1392 dbgs() <<
" is clobbered by " << *DepInst <<
'\n';);
1396 return std::nullopt;
1405 if (Constant *InitVal =
1415 return std::nullopt;
1418 if (S->isAtomic() <
Load->isAtomic())
1419 return std::nullopt;
1430 return std::nullopt;
1433 if (
LD->isAtomic() <
Load->isAtomic())
1434 return std::nullopt;
1443 assert(Sel->getType() ==
Load->getPointerOperandType());
1449 return std::nullopt;
1454 return std::nullopt;
1462 dbgs() <<
" has unknown def " << *DepInst <<
'\n';);
1463 return std::nullopt;
1466void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1467 AvailValInBlkVect &ValuesPerBlock,
1468 UnavailBlkVect &UnavailableBlocks) {
1473 for (
const auto &Dep : Deps) {
1475 MemDepResult DepInfo = Dep.getResult();
1477 if (DeadBlocks.count(DepBB)) {
1485 UnavailableBlocks.push_back(DepBB);
1492 if (
auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1496 ValuesPerBlock.push_back(
1499 UnavailableBlocks.push_back(DepBB);
1503 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1504 "post condition violation");
1526LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1530 if (
Term->getNumSuccessors() != 2 ||
Term->isSpecialTerminator())
1532 auto *SuccBB =
Term->getSuccessor(0);
1533 if (SuccBB == LoadBB)
1534 SuccBB =
Term->getSuccessor(1);
1535 if (!SuccBB->getSinglePredecessor())
1539 for (Instruction &Inst : *SuccBB) {
1540 if (Inst.isDebugOrPseudoInst())
1542 if (--NumInsts == 0)
1545 if (!Inst.isIdenticalTo(Load))
1548 MemDepResult Dep = MD->getDependency(&Inst);
1553 if (Dep.
isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1564void GVNPass::eliminatePartiallyRedundantLoad(
1565 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1566 MapVector<BasicBlock *, Value *> &AvailableLoads,
1567 MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1568 for (
const auto &AvailableLoad : AvailableLoads) {
1569 BasicBlock *UnavailableBlock = AvailableLoad.first;
1570 Value *LoadPtr = AvailableLoad.second;
1572 auto *NewLoad =
new LoadInst(
1573 Load->getType(), LoadPtr,
Load->getName() +
".pre",
Load->isVolatile(),
1574 Load->getAlign(),
Load->getOrdering(),
Load->getSyncScopeID(),
1576 NewLoad->setDebugLoc(
Load->getDebugLoc());
1578 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1581 MSSAU->insertDef(NewDef,
true);
1587 AAMDNodes Tags =
Load->getAAMetadata();
1589 NewLoad->setAAMetadata(Tags);
1591 if (
auto *MD =
Load->getMetadata(LLVMContext::MD_invariant_load))
1592 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1593 if (
auto *InvGroupMD =
Load->getMetadata(LLVMContext::MD_invariant_group))
1594 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1595 if (
auto *RangeMD =
Load->getMetadata(LLVMContext::MD_range))
1596 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1597 if (
auto *NoFPClassMD =
Load->getMetadata(LLVMContext::MD_nofpclass))
1598 NewLoad->setMetadata(LLVMContext::MD_nofpclass, NoFPClassMD);
1600 if (
auto *AccessMD =
Load->getMetadata(LLVMContext::MD_access_group))
1601 if (LI->getLoopFor(
Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1602 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1611 ValuesPerBlock.push_back(
1613 MD->invalidateCachedPointerInfo(LoadPtr);
1618 if (CriticalEdgePredAndLoad) {
1619 auto It = CriticalEdgePredAndLoad->
find(UnavailableBlock);
1620 if (It != CriticalEdgePredAndLoad->
end()) {
1621 ++NumPRELoadMoved2CEPred;
1622 ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1623 LoadInst *OldLoad = It->second;
1627 if (uint32_t ValNo = VN.lookup(OldLoad,
false))
1628 LeaderTable.erase(ValNo, OldLoad, OldLoad->
getParent());
1629 removeInstruction(OldLoad);
1637 ICF->removeUsersOf(Load);
1638 Load->replaceAllUsesWith(V);
1642 I->setDebugLoc(
Load->getDebugLoc());
1643 if (
V->getType()->isPtrOrPtrVectorTy())
1644 MD->invalidateCachedPointerInfo(V);
1646 return OptimizationRemark(
DEBUG_TYPE,
"LoadPRE", Load)
1647 <<
"load eliminated by PRE";
1652bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1653 UnavailBlkVect &UnavailableBlocks) {
1662 SmallPtrSet<BasicBlock *, 4> Blockers(
llvm::from_range, UnavailableBlocks);
1684 bool MustEnsureSafetyOfSpeculativeExecution =
1685 ICF->isDominatedByICFIFromSameBlock(Load);
1689 if (TmpBB == LoadBB)
1691 if (Blockers.count(TmpBB))
1703 MustEnsureSafetyOfSpeculativeExecution =
1704 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1712 MapVector<BasicBlock *, Value *> PredLoads;
1713 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1714 for (
const AvailableValueInBlock &AV : ValuesPerBlock)
1716 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1724 MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1730 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1731 << Pred->
getName() <<
"': " << *Load <<
'\n');
1742 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1743 << Pred->
getName() <<
"': " << *Load <<
'\n');
1749 dbgs() <<
"COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1750 << Pred->
getName() <<
"': " << *Load <<
'\n');
1756 if (DT->dominates(LoadBB, Pred)) {
1759 <<
"COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1760 << Pred->
getName() <<
"': " << *Load <<
'\n');
1764 if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1765 CriticalEdgePredAndLoad[Pred] = LI;
1770 PredLoads[Pred] =
nullptr;
1775 unsigned NumInsertPreds = PredLoads.
size() + CriticalEdgePredSplit.
size();
1776 unsigned NumUnavailablePreds = NumInsertPreds +
1777 CriticalEdgePredAndLoad.
size();
1778 assert(NumUnavailablePreds != 0 &&
1779 "Fully available value should already be eliminated!");
1780 (void)NumUnavailablePreds;
1786 if (NumInsertPreds > 1)
1791 if (MustEnsureSafetyOfSpeculativeExecution) {
1792 if (CriticalEdgePredSplit.
size())
1796 for (
auto &PL : PredLoads)
1800 for (
auto &CEP : CriticalEdgePredAndLoad)
1807 for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1808 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1809 assert(!PredLoads.count(OrigPred) &&
"Split edges shouldn't be in map!");
1810 PredLoads[NewPred] =
nullptr;
1811 LLVM_DEBUG(
dbgs() <<
"Split critical edge " << OrigPred->getName() <<
"->"
1812 << LoadBB->
getName() <<
'\n');
1815 for (
auto &CEP : CriticalEdgePredAndLoad)
1816 PredLoads[CEP.first] =
nullptr;
1819 bool CanDoPRE =
true;
1820 const DataLayout &
DL =
Load->getDataLayout();
1821 SmallVector<Instruction*, 8> NewInsts;
1822 for (
auto &PredLoad : PredLoads) {
1823 BasicBlock *UnavailablePred = PredLoad.first;
1833 Value *LoadPtr =
Load->getPointerOperand();
1835 while (Cur != LoadBB) {
1848 LoadPtr =
Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1855 << *
Load->getPointerOperand() <<
"\n");
1860 PredLoad.second = LoadPtr;
1864 while (!NewInsts.
empty()) {
1874 return !CriticalEdgePredSplit.empty();
1880 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOAD: " << *Load <<
'\n');
1882 <<
" INSTS: " << *NewInsts.
back()
1886 for (Instruction *
I : NewInsts) {
1890 I->updateLocationAfterHoist();
1899 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1900 &CriticalEdgePredAndLoad);
1905bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1906 AvailValInBlkVect &ValuesPerBlock,
1907 UnavailBlkVect &UnavailableBlocks) {
1908 const Loop *
L = LI->getLoopFor(
Load->getParent());
1910 if (!L ||
L->getHeader() !=
Load->getParent())
1915 if (!Preheader || !Latch)
1918 Value *LoadPtr =
Load->getPointerOperand();
1920 if (!
L->isLoopInvariant(LoadPtr))
1926 if (ICF->isDominatedByICFIFromSameBlock(Load))
1930 for (
auto *Blocker : UnavailableBlocks) {
1932 if (!
L->contains(Blocker))
1944 if (L != LI->getLoopFor(Blocker))
1952 if (DT->dominates(Blocker, Latch))
1956 if (Blocker->getTerminator()->mayWriteToMemory())
1959 LoopBlock = Blocker;
1971 MapVector<BasicBlock *, Value *> AvailableLoads;
1972 AvailableLoads[LoopBlock] = LoadPtr;
1973 AvailableLoads[Preheader] = LoadPtr;
1975 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING PRE LOOP LOAD: " << *Load <<
'\n');
1976 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1984 using namespace ore;
1988 <<
"load of type " << NV(
"Type", Load->getType()) <<
" eliminated"
1989 << setExtraArgs() <<
" in favor of "
1996bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1998 if (
Load->getParent()->getParent()->hasFnAttribute(
1999 Attribute::SanitizeAddress) ||
2000 Load->getParent()->getParent()->hasFnAttribute(
2001 Attribute::SanitizeHWAddress))
2006 MD->getNonLocalPointerDependency(Load, Deps);
2011 unsigned NumDeps = Deps.size();
2018 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
2020 dbgs() <<
" has unknown dependencies\n";);
2026 if (GetElementPtrInst *
GEP =
2028 for (Use &U :
GEP->indices())
2034 AvailValInBlkVect ValuesPerBlock;
2035 UnavailBlkVect UnavailableBlocks;
2036 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
2040 if (ValuesPerBlock.empty())
2048 if (UnavailableBlocks.empty()) {
2049 LLVM_DEBUG(
dbgs() <<
"GVN REMOVING NONLOCAL LOAD: " << *Load <<
'\n');
2054 ICF->removeUsersOf(Load);
2055 Load->replaceAllUsesWith(V);
2063 if (
Load->getDebugLoc() &&
Load->getParent() ==
I->getParent())
2064 I->setDebugLoc(
Load->getDebugLoc());
2065 if (
V->getType()->isPtrOrPtrVectorTy())
2066 MD->invalidateCachedPointerInfo(V);
2079 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
2080 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
2086bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
2090 if (
Cond->isZero()) {
2100 const MemoryUseOrDef *FirstNonDom =
nullptr;
2102 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->
getParent());
2109 for (
const auto &Acc : *AL) {
2111 if (!Current->getMemoryInst()->comesBefore(NewS)) {
2112 FirstNonDom = Current;
2119 FirstNonDom ? MSSAU->createMemoryAccessBefore(
2121 const_cast<MemoryUseOrDef *
>(FirstNonDom))
2122 : MSSAU->createMemoryAccessInBB(
2144 return propagateEquality(V, True, IntrinsicI);
2149 I->replaceAllUsesWith(Repl);
2154bool GVNPass::processLoad(LoadInst *L) {
2159 if (!
L->isUnordered())
2162 if (
L->getType()->isTokenLikeTy())
2165 if (
L->use_empty()) {
2171 MemDepResult Dep = MD->getDependency(L);
2175 return processNonLocalLoad(L);
2182 dbgs() <<
"GVN: load ";
L->printAsOperand(
dbgs());
2183 dbgs() <<
" has unknown dependence\n";);
2187 auto AV = AnalyzeLoadAvailability(L, Dep,
L->getPointerOperand());
2191 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L);
2194 ICF->removeUsersOf(L);
2195 L->replaceAllUsesWith(AvailableValue);
2197 MSSAU->removeMemoryAccess(L);
2204 MD->invalidateCachedPointerInfo(AvailableValue);
2210bool GVNPass::processMaskedLoad(IntrinsicInst *
I) {
2213 MemDepResult Dep = MD->getDependency(
I);
2219 Value *Passthrough =
I->getOperand(2);
2223 StoreVal->
getType() !=
I->getType())
2230 ICF->removeUsersOf(
I);
2231 I->replaceAllUsesWith(OpToForward);
2239std::pair<uint32_t, bool>
2240GVNPass::ValueTable::assignExpNewValueNum(
Expression &Exp) {
2241 uint32_t &
E = ExpressionNumbering[
Exp];
2242 bool CreateNewValNum = !
E;
2243 if (CreateNewValNum) {
2244 Expressions.push_back(Exp);
2245 if (ExprIdx.size() < NextValueNumber + 1)
2246 ExprIdx.resize(NextValueNumber * 2);
2247 E = NextValueNumber;
2248 ExprIdx[NextValueNumber++] = NextExprNumber++;
2250 return {
E, CreateNewValNum};
2255bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num,
const BasicBlock *BB,
2258 GVN.LeaderTable.getLeaders(Num),
2266 auto FindRes = PhiTranslateTable.find({Num, Pred});
2267 if (FindRes != PhiTranslateTable.end())
2268 return FindRes->second;
2269 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, GVN);
2270 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2281 auto Leaders = GVN.LeaderTable.getLeaders(Num);
2282 for (
const auto &Entry : Leaders) {
2284 if (
Call &&
Call->getParent() == PhiBlock)
2288 if (
AA->doesNotAccessMemory(
Call))
2291 if (!MD || !
AA->onlyReadsMemory(
Call))
2303 if (
D.getResult().isNonFuncLocal())
2311uint32_t GVNPass::ValueTable::phiTranslateImpl(
const BasicBlock *Pred,
2312 const BasicBlock *PhiBlock,
2316 if (PHINode *PN = NumberingPhi[Num]) {
2317 if (PN->getParent() != PhiBlock)
2319 for (
unsigned I = 0;
I != PN->getNumIncomingValues(); ++
I) {
2320 if (PN->getIncomingBlock(
I) != Pred)
2322 if (uint32_t TransVal =
lookup(PN->getIncomingValue(
I),
false))
2328 if (BasicBlock *BB = NumberingBB[Num]) {
2329 assert(MSSA &&
"NumberingBB is non-empty only when using MemorySSA");
2335 MemoryPhi *MPhi = MSSA->getMemoryAccess(BB);
2341 return lookupOrAdd(PredPhi->getBlock());
2342 if (MSSA->isLiveOnEntryDef(MA))
2347 "CFG/MemorySSA mismatch: predecessor not found among incoming blocks");
2353 if (!areAllValsInBB(Num, PhiBlock, GVN))
2356 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2360 for (
unsigned I = 0;
I <
Exp.VarArgs.size();
I++) {
2364 if ((
I > 1 &&
Exp.Opcode == Instruction::InsertValue) ||
2365 (
I > 0 &&
Exp.Opcode == Instruction::ExtractValue) ||
2366 (
I > 1 &&
Exp.Opcode == Instruction::ShuffleVector))
2368 Exp.VarArgs[
I] = phiTranslate(Pred, PhiBlock,
Exp.VarArgs[
I], GVN);
2371 if (
Exp.Commutative) {
2372 assert(
Exp.VarArgs.size() >= 2 &&
"Unsupported commutative instruction!");
2373 if (
Exp.VarArgs[0] >
Exp.VarArgs[1]) {
2375 uint32_t Opcode =
Exp.Opcode >> 8;
2376 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2377 Exp.Opcode = (Opcode << 8) |
2383 if (uint32_t NewNum = ExpressionNumbering[Exp]) {
2384 if (
Exp.Opcode == Instruction::Call && NewNum != Num)
2385 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, GVN) ? NewNum : Num;
2393void GVNPass::ValueTable::eraseTranslateCacheEntry(
2396 PhiTranslateTable.erase({Num, Pred});
2405 auto Leaders = LeaderTable.getLeaders(Num);
2406 if (Leaders.empty())
2409 Value *Val =
nullptr;
2410 for (
const auto &Entry : Leaders) {
2411 if (DT->dominates(Entry.BB, BB)) {
2431 const BasicBlock *Pred =
E.getEnd()->getSinglePredecessor();
2432 assert((!Pred || Pred ==
E.getStart()) &&
2433 "No edge between these basic blocks!");
2434 return Pred !=
nullptr;
2437void GVNPass::assignBlockRPONumber(Function &
F) {
2438 BlockRPONumber.clear();
2439 uint32_t NextBlockNumber = 1;
2440 ReversePostOrderTraversal<Function *> RPOT(&
F);
2441 for (BasicBlock *BB : RPOT)
2442 BlockRPONumber[BB] = NextBlockNumber++;
2443 InvalidBlockRPONumbers =
false;
2451bool GVNPass::propagateEquality(
2453 const std::variant<BasicBlockEdge, Instruction *> &Root) {
2458 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root)) {
2465 for (
const auto *Node : DT->getNode(
I->getParent())->children())
2469 while (!Worklist.
empty()) {
2470 std::pair<Value*, Value*> Item = Worklist.
pop_back_val();
2471 LHS = Item.first;
RHS = Item.second;
2485 const DataLayout &
DL =
2494 uint32_t LVN = VN.lookupOrAdd(
LHS);
2499 uint32_t RVN = VN.lookupOrAdd(
RHS);
2516 for (
const BasicBlock *BB : DominatedBlocks)
2517 LeaderTable.insert(LVN,
RHS, BB);
2524 auto CanReplacePointersCallBack = [&
DL](
const Use &
U,
const Value *To) {
2527 unsigned NumReplacements;
2528 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root))
2530 LHS,
RHS, *DT, *
Edge, CanReplacePointersCallBack);
2533 LHS,
RHS, *DT, std::get<Instruction *>(Root),
2534 CanReplacePointersCallBack);
2536 if (NumReplacements > 0) {
2538 NumGVNEqProp += NumReplacements;
2541 MD->invalidateCachedPointerInfo(
LHS);
2558 bool IsKnownFalse = !IsKnownTrue;
2574 Value *Op0 =
Cmp->getOperand(0), *Op1 =
Cmp->getOperand(1);
2579 if (
Cmp->isEquivalence(IsKnownFalse))
2580 Worklist.
push_back(std::make_pair(Op0, Op1));
2584 Constant *NotVal = ConstantInt::get(
Cmp->getType(), IsKnownFalse);
2588 uint32_t NextNum = VN.getNextUnusedValueNumber();
2589 uint32_t Num = VN.lookupOrAddCmp(
Cmp->getOpcode(), NotPred, Op0, Op1);
2592 if (Num < NextNum) {
2593 for (
const auto &Entry : LeaderTable.getLeaders(Num)) {
2598 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root)) {
2599 if (!DT->dominates(
Entry.BB,
Edge->getStart()) &&
2600 !DT->dominates(
Edge->getEnd(),
Entry.BB))
2603 auto *InstBB = std::get<Instruction *>(Root)->getParent();
2604 if (!DT->dominates(
Entry.BB, InstBB) &&
2605 !DT->dominates(InstBB,
Entry.BB))
2611 unsigned NumReplacements;
2612 if (
const BasicBlockEdge *
Edge = std::get_if<BasicBlockEdge>(&Root))
2617 NotCmp, NotVal, *DT, std::get<Instruction *>(Root));
2618 Changed |= NumReplacements > 0;
2619 NumGVNEqProp += NumReplacements;
2622 MD->invalidateCachedPointerInfo(NotCmp);
2630 for (
const BasicBlock *BB : DominatedBlocks)
2631 LeaderTable.insert(Num, NotVal, BB);
2640 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), IsKnownTrue));
2645 Worklist.
emplace_back(
A, ConstantInt::get(
A->getType(), !IsKnownTrue));
2655bool GVNPass::processInstruction(Instruction *
I) {
2660 const DataLayout &
DL =
I->getDataLayout();
2663 if (!
I->use_empty()) {
2666 ICF->removeUsersOf(
I);
2667 I->replaceAllUsesWith(V);
2675 if (MD &&
V->getType()->isPtrOrPtrVectorTy())
2676 MD->invalidateCachedPointerInfo(V);
2683 return processAssumeIntrinsic(Assume);
2686 if (processLoad(Load))
2689 unsigned Num = VN.lookupOrAdd(Load);
2690 LeaderTable.insert(Num, Load,
Load->getParent());
2701 if (!BI->isConditional())
2705 return processFoldableCondBr(BI);
2707 Value *BranchCond = BI->getCondition();
2711 if (TrueSucc == FalseSucc)
2718 BasicBlockEdge TrueE(Parent, TrueSucc);
2719 Changed |= propagateEquality(BranchCond, TrueVal, TrueE);
2722 BasicBlockEdge FalseE(Parent, FalseSucc);
2723 Changed |= propagateEquality(BranchCond, FalseVal, FalseE);
2730 Value *SwitchCond =
SI->getCondition();
2735 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2737 ++SwitchEdges[Succ];
2739 for (
const auto &Case :
SI->cases()) {
2742 if (SwitchEdges.
lookup(Dst) == 1) {
2743 BasicBlockEdge
E(Parent, Dst);
2744 Changed |= propagateEquality(SwitchCond, Case.getCaseValue(),
E);
2752 if (
I->getType()->isVoidTy())
2755 uint32_t NextNum = VN.getNextUnusedValueNumber();
2756 unsigned Num = VN.lookupOrAdd(
I);
2761 LeaderTable.insert(Num,
I,
I->getParent());
2768 if (Num >= NextNum) {
2769 LeaderTable.insert(Num,
I,
I->getParent());
2775 Value *Repl = findLeader(
I->getParent(), Num);
2778 LeaderTable.insert(Num,
I,
I->getParent());
2791 MD->invalidateCachedPointerInfo(Repl);
2797bool GVNPass::runImpl(Function &
F, AssumptionCache &RunAC, DominatorTree &RunDT,
2798 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2799 MemoryDependenceResults *RunMD, LoopInfo &LI,
2800 OptimizationRemarkEmitter *RunORE,
MemorySSA *MSSA) {
2805 VN.setAliasAnalysis(&RunAA);
2807 ImplicitControlFlowTracking ImplicitCFT;
2811 VN.setMemorySSA(MSSA);
2813 InvalidBlockRPONumbers =
true;
2814 MemorySSAUpdater Updater(MSSA);
2815 MSSAU = MSSA ? &Updater :
nullptr;
2818 bool ShouldContinue =
true;
2820 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2832 unsigned Iteration = 0;
2833 while (ShouldContinue) {
2836 ShouldContinue = iterateOnFunction(
F);
2844 assignValNumForDeadCode();
2845 bool PREChanged =
true;
2846 while (PREChanged) {
2847 PREChanged = performPRE(
F);
2857 cleanupGlobalSets();
2868bool GVNPass::processBlock(BasicBlock *BB) {
2869 if (DeadBlocks.count(BB))
2872 bool ChangedFunction =
false;
2878 SmallPtrSet<PHINode *, 8> PHINodesToRemove;
2880 for (PHINode *PN : PHINodesToRemove) {
2881 removeInstruction(PN);
2884 ChangedFunction |= processInstruction(&Inst);
2885 return ChangedFunction;
2889bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2890 BasicBlock *Curr,
unsigned int ValNo) {
2896 for (
unsigned I = 0,
E =
Instr->getNumOperands();
I !=
E; ++
I) {
2904 if (!VN.exists(
Op)) {
2909 VN.phiTranslate(Pred, Curr, VN.lookup(
Op), *
this);
2910 if (
Value *V = findLeader(Pred, TValNo)) {
2928 ICF->insertInstructionTo(Instr, Pred);
2930 unsigned Num = VN.lookupOrAdd(Instr);
2934 LeaderTable.insert(Num, Instr, Pred);
2938bool GVNPass::performScalarPRE(Instruction *CurInst) {
2964 if (CallB->isInlineAsm())
2968 uint32_t ValNo = VN.lookup(CurInst);
2976 unsigned NumWith = 0;
2977 unsigned NumWithout = 0;
2982 if (InvalidBlockRPONumbers)
2983 assignBlockRPONumber(*CurrentBlock->
getParent());
2989 if (!DT->isReachableFromEntry(
P)) {
2994 assert(BlockRPONumber.count(
P) && BlockRPONumber.count(CurrentBlock) &&
2995 "Invalid BlockRPONumber map.");
2996 if (BlockRPONumber[
P] >= BlockRPONumber[CurrentBlock]) {
3001 uint32_t TValNo = VN.phiTranslate(
P, CurrentBlock, ValNo, *
this);
3002 Value *PredV = findLeader(
P, TValNo);
3007 }
else if (PredV == CurInst) {
3019 if (NumWithout > 1 || NumWith == 0)
3027 if (NumWithout != 0) {
3033 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
3046 ToSplit.push_back(std::make_pair(PREPred->
getTerminator(), SuccNum));
3050 PREInstr = CurInst->
clone();
3051 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
3054 verifyRemoved(PREInstr);
3063 assert(PREInstr !=
nullptr || NumWithout == 0);
3069 CurInst->
getName() +
".pre-phi");
3070 Phi->insertBefore(CurrentBlock->begin());
3071 for (
auto &[V, BB] : PredMap) {
3076 Phi->addIncoming(V, BB);
3078 Phi->addIncoming(PREInstr, PREPred);
3084 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3085 LeaderTable.insert(ValNo, Phi, CurrentBlock);
3088 if (MD &&
Phi->getType()->isPtrOrPtrVectorTy())
3089 MD->invalidateCachedPointerInfo(Phi);
3090 LeaderTable.erase(ValNo, CurInst, CurrentBlock);
3093 removeInstruction(CurInst);
3101bool GVNPass::performPRE(Function &
F) {
3103 for (BasicBlock *CurrentBlock :
depth_first(&
F.getEntryBlock())) {
3105 if (CurrentBlock == &
F.getEntryBlock())
3109 if (CurrentBlock->isEHPad())
3113 BE = CurrentBlock->end();
3116 Changed |= performScalarPRE(CurInst);
3120 if (splitCriticalEdges())
3128BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3133 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3136 MD->invalidateCachedPredecessors();
3137 InvalidBlockRPONumbers =
true;
3144bool GVNPass::splitCriticalEdges() {
3145 if (ToSplit.empty())
3150 std::pair<Instruction *, unsigned>
Edge = ToSplit.pop_back_val();
3152 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3154 }
while (!ToSplit.empty());
3157 MD->invalidateCachedPredecessors();
3158 InvalidBlockRPONumbers =
true;
3164bool GVNPass::iterateOnFunction(Function &
F) {
3165 cleanupGlobalSets();
3172 ReversePostOrderTraversal<Function *> RPOT(&
F);
3174 for (BasicBlock *BB : RPOT)
3180void GVNPass::cleanupGlobalSets() {
3182 LeaderTable.clear();
3183 BlockRPONumber.clear();
3185 InvalidBlockRPONumbers =
true;
3188void GVNPass::removeInstruction(Instruction *
I) {
3190 if (MD) MD->removeInstruction(
I);
3192 MSSAU->removeMemoryAccess(
I);
3196 ICF->removeInstruction(
I);
3197 I->eraseFromParent();
3202void GVNPass::verifyRemoved(
const Instruction *Inst)
const {
3203 VN.verifyRemoved(Inst);
3204 LeaderTable.verifyRemoved(Inst);
3211void GVNPass::addDeadBlock(BasicBlock *BB) {
3213 SmallSetVector<BasicBlock *, 4>
DF;
3216 while (!NewDead.
empty()) {
3218 if (DeadBlocks.count(
D))
3222 SmallVector<BasicBlock *, 8> Dom;
3223 DT->getDescendants(
D, Dom);
3224 DeadBlocks.insert_range(Dom);
3227 for (BasicBlock *
B : Dom) {
3229 if (DeadBlocks.count(S))
3232 bool AllPredDead =
true;
3234 if (!DeadBlocks.count(
P)) {
3235 AllPredDead =
false;
3255 for (BasicBlock *
B :
DF) {
3256 if (DeadBlocks.count(
B))
3262 for (BasicBlock *
P : Preds) {
3263 if (!DeadBlocks.count(
P))
3268 if (BasicBlock *S = splitCriticalEdges(
P,
B))
3269 DeadBlocks.insert(
P = S);
3275 if (!DeadBlocks.count(
P))
3277 for (PHINode &Phi :
B->phis()) {
3280 MD->invalidateCachedPointerInfo(&Phi);
3299bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3313 if (DeadBlocks.count(DeadRoot))
3317 DeadRoot = splitCriticalEdges(BI->
getParent(), DeadRoot);
3319 addDeadBlock(DeadRoot);
3327void GVNPass::assignValNumForDeadCode() {
3328 for (BasicBlock *BB : DeadBlocks) {
3329 for (Instruction &Inst : *BB) {
3330 unsigned ValNum = VN.lookupOrAdd(&Inst);
3331 LeaderTable.insert(ValNum, &Inst, BB);
3343 .setMemDep(MemDepAnalysis)
3344 .setMemorySSA(MemSSAAnalysis)) {
3353 if (Impl.isMemorySSAEnabled() && !MSSAWP)
3356 return Impl.runImpl(
3361 Impl.isMemDepEnabled()
3366 MSSAWP ? &MSSAWP->getMSSA() :
nullptr);
3374 if (Impl.isMemDepEnabled())
3383 if (Impl.isMemorySSAEnabled())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
early cse Early CSE w MemorySSA
static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, const DominatorTree *DT, OptimizationRemarkEmitter *ORE)
Try to locate the three instruction involved in a missed load-elimination case that is due to an inte...
static void reportLoadElim(LoadInst *Load, Value *AvailableValue, OptimizationRemarkEmitter *ORE)
static cl::opt< uint32_t > MaxNumInsnsPerBlock("gvn-max-num-insns", cl::Hidden, cl::init(100), cl::desc("Max number of instructions to scan in each basic block in GVN " "(default = 100)"))
static cl::opt< bool > GVNEnableMemDep("enable-gvn-memdep", cl::init(true))
static cl::opt< bool > GVNEnableLoadInLoopPRE("enable-load-in-loop-pre", cl::init(true))
static const Instruction * findMayClobberedPtrAccess(LoadInst *Load, const DominatorTree *DT)
static cl::opt< uint32_t > MaxNumDeps("gvn-max-num-deps", cl::Hidden, cl::init(100), cl::desc("Max number of dependences to attempt Load PRE (default = 100)"))
static cl::opt< bool > GVNEnableMemorySSA("enable-gvn-memoryssa", cl::init(false))
static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT)
There is an edge from 'Src' to 'Dst'.
static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap< BasicBlock *, AvailabilityState > &FullyAvailableBlocks)
Return true if we can prove that the value we're analyzing is fully available in the specified block.
static Value * findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, Instruction *From, AAResults *AA)
static bool liesBetween(const Instruction *From, Instruction *Between, const Instruction *To, const DominatorTree *DT)
Assuming To can be reached from both From and Between, does Between lie on every path from From to To...
static bool isLifetimeStart(const Instruction *Inst)
static cl::opt< bool > GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre", cl::init(false))
static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl)
static void replaceValuesPerBlockEntry(SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, Value *OldValue, Value *NewValue)
If the specified OldValue exists in ValuesPerBlock, replace its value with NewValue.
static Value * ConstructSSAForLoadSet(LoadInst *Load, SmallVectorImpl< AvailableValueInBlock > &ValuesPerBlock, GVNPass &GVN)
Given a set of loads specified by ValuesPerBlock, construct SSA form, allowing us to eliminate Load.
@ Unavailable
We know the block is not fully available. This is a fixpoint.
@ Available
We know the block is fully available. This is a fixpoint.
@ SpeculativelyAvailable
We do not know whether the block is fully available or not, but we are currently speculating that it ...
static cl::opt< bool > GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden)
static cl::opt< uint32_t > MaxNumVisitedInsts("gvn-max-num-visited-insts", cl::Hidden, cl::init(100), cl::desc("Max number of visited instructions when trying to find " "dominating value of select dependency (default = 100)"))
static cl::opt< uint32_t > MaxBBSpeculations("gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::desc("Max number of blocks we're willing to speculate on (and recurse " "into) when deducing if a value is fully available or not in GVN " "(default = 600)"))
static cl::opt< bool > GVNEnableLoadPRE("enable-load-pre", cl::init(true))
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
This file implements a map that provides insertion order iteration.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
uint64_t IntrinsicInst * II
ppc ctr loops PowerPC CTR Loops Verify
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
bool isEHPad() const
Return true if this basic block is an exception handling block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents calls to the gc.relocate intrinsic.
This class holds the mapping between values and value numbers.
LLVM_ABI uint32_t lookupOrAdd(MemoryAccess *MA)
The core GVN pass object.
friend class gvn::GVNLegacyPass
LLVM_ABI bool isPREEnabled() const
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Run the pass over the function.
LLVM_ABI void salvageAndRemoveInstruction(Instruction *I)
This removes the specified instruction from our various maps and marks it for deletion.
AAResults * getAliasAnalysis() const
LLVM_ABI bool isLoadPREEnabled() const
GVNPass(GVNOptions Options={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI bool isMemorySSAEnabled() const
DominatorTree & getDominatorTree() const
LLVM_ABI bool isLoadInLoopPREEnabled() const
LLVM_ABI bool isLoadPRESplitBackedgeEnabled() const
LLVM_ABI bool isMemDepEnabled() const
Legacy wrapper pass to provide the GlobalsAAResult object.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool isTerminator() const
LLVM_ABI bool mayReadFromMemory() const LLVM_READONLY
Return true if this instruction may read memory.
LLVM_ABI void dropUnknownNonDebugMetadata(ArrayRef< unsigned > KnownIDs={})
Drop all unknown metadata except for debug locations.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
iterator find(const KeyT &Key)
A memory dependence query can return one of three different answers.
bool isClobber() const
Tests if this MemDepResult represents a query that is an instruction clobber dependency.
bool isNonLocal() const
Tests if this MemDepResult represents a query that is transparent to the start of the block,...
bool isDef() const
Tests if this MemDepResult represents a query that is an instruction definition dependency.
bool isLocal() const
Tests if this MemDepResult represents a valid local query (Clobber/Def).
Instruction * getInst() const
If this is a normal dependency, returns the instruction that is depended on.
This is the common base class for memset/memcpy/memmove.
BasicBlock * getBlock() const
An analysis that produces MemoryDependenceResults for a function.
std::vector< NonLocalDepEntry > NonLocalDepInfo
MemDepResult getDependency(Instruction *QueryInst)
Returns the instruction on which a memory operation depends.
const NonLocalDepInfo & getNonLocalCallDependency(CallBase *QueryCall)
Perform a full dependency query for the specified call, returning the set of blocks that the value is...
A wrapper analysis pass for the legacy pass manager that exposes a MemoryDepnedenceResults instance.
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
BasicBlock * getIncomingBlock(unsigned I) const
Return incoming basic block number i.
MemoryAccess * getIncomingValue(unsigned I) const
Return incoming value number x.
An analysis that produces MemorySSA for a function.
Legacy analysis pass which computes MemorySSA.
LLVM_ABI void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
This is an entry in the NonLocalDepInfo cache.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Helper class for SSA formation on a set of values defined in multiple blocks.
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
bool HasValueForBlock(BasicBlock *BB) const
Return true if the SSAUpdater already has a value for the specified block.
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
SmallVector & operator=(const SmallVector &RHS)
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isTokenLikeTy() const
Returns true if this is 'token' or a token-like target type.s.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
LLVM_ABI void deleteValue()
Delete a pointer to a generic Value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
An efficient, type-erasing, non-owning reference to a callable.
GVNLegacyPass(bool MemDepAnalysis=GVNEnableMemDep, bool MemSSAAnalysis=GVNEnableMemorySSA)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
An opaque object representing a hash code.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
Matches MaskedStore Intrinsic.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the store at D...
Value * getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL)
If analyzeLoadFromClobberingMemInst returned an offset, this function can be used to actually perform...
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the load at De...
Value * getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, Function *F)
If analyzeLoadFromClobberingStore/Load returned an offset, this function can be used to actually perf...
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *DepMI, const DataLayout &DL)
This function determines whether a value for the pointer LoadPtr can be extracted from the memory int...
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, Function *F)
Return true if CoerceAvailableValueToLoadType would succeed if it was called.
initializer< Ty > init(const Ty &Val)
A private "module" namespace for types and utilities used by GVN.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
NodeAddr< InstrNode * > Instr
NodeAddr< PhiNode * > Phi
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
LLVM_ABI unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ)
Search for the specified successor of basic block BB and return its position in the terminator instru...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool canReplacePointersInUseIfEqual(const Use &U, const Value *To, const DataLayout &DL)
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
bool isModSet(const ModRefInfo MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
LLVM_ABI void initializeGVNLegacyPassPass(PassRegistry &)
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
LLVM_ABI bool VerifyMemorySSA
Enables verification of MemorySSA.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
LLVM_ABI bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
LLVM_ABI bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, MemoryDependenceResults *MemDep=nullptr, bool PredecessorWithTwoSuccessors=false, DominatorTree *DT=nullptr)
Attempts to merge a block into its predecessor, if possible.
LLVM_ABI FunctionPass * createGVNPass()
Create a legacy GVN pass.
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
LLVM_ABI BasicBlock * SplitCriticalEdge(Instruction *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
If this edge is a critical edge, insert a new node to split the critical edge.
LLVM_ABI bool isCriticalEdge(const Instruction *TI, unsigned SuccNum, bool AllowIdenticalEdges=false)
Return true if the specified edge is a critical edge.
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool pred_empty(const BasicBlock *BB)
iterator_range< df_iterator< T > > depth_first(const T &G)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static GVNPass::Expression getTombstoneKey()
static bool isEqual(const GVNPass::Expression &LHS, const GVNPass::Expression &RHS)
static GVNPass::Expression getEmptyKey()
static unsigned getHashValue(const GVNPass::Expression &E)
An information struct used to provide DenseMap with the various necessary components for a given valu...
A set of parameters to control various transforms performed by GVN pass.
bool operator==(const Expression &Other) const
friend hash_code hash_value(const Expression &Value)
SmallVector< uint32_t, 4 > VarArgs
Expression(uint32_t Op=~2U)
A CRTP mix-in to automatically provide informational APIs needed for passes.
Represents an AvailableValue which can be rematerialized at the end of the associated BasicBlock.
static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset=0)
static AvailableValueInBlock getUndef(BasicBlock *BB)
static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV)
AvailableValue AV
AV - The actual available value.
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel, Value *V1, Value *V2)
BasicBlock * BB
BB - The basic block in question.
Value * MaterializeAdjustedValue(LoadInst *Load) const
Emit code at the end of this block to adjust the value defined here to the specified type.
Represents a particular available value that we know how to materialize.
unsigned Offset
Offset - The byte offset in Val that is interesting for the load query.
bool isSimpleValue() const
static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2)
bool isCoercedLoadValue() const
static AvailableValue get(Value *V, unsigned Offset=0)
ValType Kind
Kind of the live-out value.
LoadInst * getCoercedLoadValue() const
static AvailableValue getLoad(LoadInst *Load, unsigned Offset=0)
static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset=0)
bool isUndefValue() const
bool isSelectValue() const
Value * Val
Val - The value that is live out of the block.
Value * V1
V1, V2 - The dominating non-clobbered values of SelectVal.
static AvailableValue getUndef()
SelectInst * getSelectValue() const
Value * getSimpleValue() const
bool isMemIntrinValue() const
MemIntrinsic * getMemIntrinValue() const
Value * MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt) const
Emit code at the specified insertion point to adjust the value defined here to the specified type.