63 #define DEBUG_TYPE "early-cse"
65 STATISTIC(NumSimplify,
"Number of instructions simplified or DCE'd");
66 STATISTIC(NumCSE,
"Number of instructions CSE'd");
67 STATISTIC(NumCSECVP,
"Number of compare instructions CVP'd");
68 STATISTIC(NumCSELoad,
"Number of load instructions CSE'd");
69 STATISTIC(NumCSECall,
"Number of call instructions CSE'd");
70 STATISTIC(NumDSE,
"Number of trivial dead stores removed");
73 "Controls which instructions are removed");
77 cl::desc(
"Enable imprecision in EarlyCSE in pathological cases, in exchange "
78 "for faster compile. Caps the MemorySSA clobbering calls."));
82 cl::desc(
"Perform extra assertion checking to verify that SimpleValue's hash "
83 "function is well-behaved w.r.t. its isEqual predicate"));
108 if (
CallInst *CI = dyn_cast<CallInst>(Inst)) {
109 if (
Function *
F = CI->getCalledFunction()) {
111 case Intrinsic::experimental_constrained_fadd:
112 case Intrinsic::experimental_constrained_fsub:
113 case Intrinsic::experimental_constrained_fmul:
114 case Intrinsic::experimental_constrained_fdiv:
115 case Intrinsic::experimental_constrained_frem:
116 case Intrinsic::experimental_constrained_fptosi:
117 case Intrinsic::experimental_constrained_sitofp:
118 case Intrinsic::experimental_constrained_fptoui:
119 case Intrinsic::experimental_constrained_uitofp:
120 case Intrinsic::experimental_constrained_fcmp:
121 case Intrinsic::experimental_constrained_fcmps: {
122 auto *CFP = cast<ConstrainedFPIntrinsic>(CI);
123 return CFP->isDefaultFPEnvironment();
127 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
129 return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) ||
130 isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
131 isa<CmpInst>(Inst) || isa<SelectInst>(Inst) ||
132 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
133 isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) ||
134 isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst);
151 static unsigned getHashValue(SimpleValue Val);
211 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
217 if (
CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
226 if (std::tie(
LHS, Pred) > std::tie(
RHS, SwappedPred)) {
266 if (
CastInst *CI = dyn_cast<CastInst>(Inst))
267 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
269 if (
FreezeInst *FI = dyn_cast<FreezeInst>(Inst))
270 return hash_combine(FI->getOpcode(), FI->getOperand(0));
273 return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
277 return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
281 assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
282 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
283 isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst) ||
284 isa<FreezeInst>(Inst)) &&
285 "Invalid/unknown instruction");
290 auto *II = dyn_cast<IntrinsicInst>(Inst);
291 if (II && II->isCommutative() && II->arg_size() == 2) {
292 Value *
LHS = II->getArgOperand(0), *
RHS = II->getArgOperand(1);
302 return hash_combine(GCR->getOpcode(), GCR->getOperand(0),
303 GCR->getBasePtr(), GCR->getDerivedPtr());
326 if (
LHS.isSentinel() ||
RHS.isSentinel())
329 if (LHSI->
getOpcode() != RHSI->getOpcode())
336 if (!LHSBinOp->isCommutative())
339 assert(isa<BinaryOperator>(RHSI) &&
340 "same opcode, but different instruction type?");
344 return LHSBinOp->getOperand(0) == RHSBinOp->
getOperand(1) &&
345 LHSBinOp->getOperand(1) == RHSBinOp->
getOperand(0);
347 if (
CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
348 assert(isa<CmpInst>(RHSI) &&
349 "same opcode, but different instruction type?");
350 CmpInst *RHSCmp = cast<CmpInst>(RHSI);
352 return LHSCmp->getOperand(0) == RHSCmp->
getOperand(1) &&
353 LHSCmp->getOperand(1) == RHSCmp->
getOperand(0) &&
354 LHSCmp->getSwappedPredicate() == RHSCmp->
getPredicate();
358 auto *LII = dyn_cast<IntrinsicInst>(LHSI);
359 auto *RII = dyn_cast<IntrinsicInst>(RHSI);
360 if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() &&
361 LII->isCommutative() && LII->arg_size() == 2) {
362 return LII->getArgOperand(0) == RII->getArgOperand(1) &&
363 LII->getArgOperand(1) == RII->getArgOperand(0);
369 return GCR1->getOperand(0) == GCR2->getOperand(0) &&
370 GCR1->getBasePtr() == GCR2->getBasePtr() &&
371 GCR1->getDerivedPtr() == GCR2->getDerivedPtr();
377 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
384 return ((LHSA == RHSA && LHSB == RHSB) ||
385 (LHSA == RHSB && LHSB == RHSA));
388 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
410 if (LHSA == RHSB && LHSB == RHSA) {
457 CallInst *CI = dyn_cast<CallInst>(Inst);
477 static unsigned getHashValue(CallValue Val);
494 if (
LHS.isSentinel() ||
RHS.isSentinel())
521 std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
536 ScopedHTType AvailableValues;
554 unsigned Generation = 0;
556 bool IsAtomic =
false;
558 LoadValue() =
default;
559 LoadValue(
Instruction *Inst,
unsigned Generation,
unsigned MatchingId,
561 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
562 IsAtomic(IsAtomic) {}
565 using LoadMapAllocator =
572 LoadHTType AvailableLoads;
577 using InvariantMapAllocator =
580 using InvariantHTType =
582 InvariantMapAllocator>;
583 InvariantHTType AvailableInvariants;
591 CallHTType AvailableCalls;
594 unsigned CurrentGeneration = 0;
600 : TLI(TLI),
TTI(
TTI), DT(DT), AC(AC), SQ(
DL, &TLI, &DT, &AC), MSSA(MSSA),
606 unsigned ClobberCounter = 0;
612 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
613 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
614 :
Scope(AvailableValues), LoadScope(AvailableLoads),
615 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
616 NodeScope(
const NodeScope &) =
delete;
617 NodeScope &operator=(
const NodeScope &) =
delete;
620 ScopedHTType::ScopeTy
Scope;
621 LoadHTType::ScopeTy LoadScope;
622 InvariantHTType::ScopeTy InvariantScope;
623 CallHTType::ScopeTy CallScope;
632 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
633 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
636 : CurrentGeneration(cg), ChildGeneration(cg), Node(
n), ChildIter(child),
638 Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
641 StackNode(
const StackNode &) =
delete;
642 StackNode &operator=(
const StackNode &) =
delete;
645 unsigned currentGeneration()
const {
return CurrentGeneration; }
646 unsigned childGeneration()
const {
return ChildGeneration; }
658 bool isProcessed()
const {
return Processed; }
659 void process() { Processed =
true; }
662 unsigned CurrentGeneration;
663 unsigned ChildGeneration;
668 bool Processed =
false;
673 class ParseMemoryInst {
678 IntrID = II->getIntrinsicID();
681 if (isHandledNonTargetIntrinsic(IntrID)) {
683 case Intrinsic::masked_load:
685 Info.MatchingId = Intrinsic::masked_load;
687 Info.WriteMem =
false;
688 Info.IsVolatile =
false;
690 case Intrinsic::masked_store:
698 Info.MatchingId = Intrinsic::masked_load;
699 Info.ReadMem =
false;
700 Info.WriteMem =
true;
701 Info.IsVolatile =
false;
714 return isa<LoadInst>(Inst);
719 return Info.WriteMem;
720 return isa<StoreInst>(Inst);
723 bool isAtomic()
const {
729 bool isUnordered()
const {
731 return Info.isUnordered();
733 if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
734 return LI->isUnordered();
735 }
else if (
StoreInst *
SI = dyn_cast<StoreInst>(Inst)) {
736 return SI->isUnordered();
742 bool isVolatile()
const {
744 return Info.IsVolatile;
746 if (
LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
747 return LI->isVolatile();
748 }
else if (
StoreInst *
SI = dyn_cast<StoreInst>(Inst)) {
749 return SI->isVolatile();
755 bool isInvariantLoad()
const {
756 if (
auto *LI = dyn_cast<LoadInst>(Inst))
757 return LI->hasMetadata(LLVMContext::MD_invariant_load);
767 int getMatchingId()
const {
769 return Info.MatchingId;
779 Type *getValueType()
const {
782 switch (II->getIntrinsicID()) {
783 case Intrinsic::masked_load:
784 return II->getType();
785 case Intrinsic::masked_store:
786 return II->getArgOperand(0)->getType();
794 bool mayReadFromMemory()
const {
800 bool mayWriteToMemory()
const {
802 return Info.WriteMem;
816 case Intrinsic::masked_load:
817 case Intrinsic::masked_store:
822 static bool isHandledNonTargetIntrinsic(
const Value *V) {
823 if (
auto *II = dyn_cast<IntrinsicInst>(V))
824 return isHandledNonTargetIntrinsic(II->getIntrinsicID());
833 Value *getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
834 unsigned CurrentGeneration);
836 bool overridingStores(
const ParseMemoryInst &Earlier,
837 const ParseMemoryInst &Later);
841 if (
auto *LI = dyn_cast<LoadInst>(Inst))
842 return LI->
getType() == ExpectedType ? LI :
nullptr;
843 else if (
auto *
SI = dyn_cast<StoreInst>(Inst)) {
844 Value *V =
SI->getValueOperand();
845 return V->
getType() == ExpectedType ? V :
nullptr;
847 assert(isa<IntrinsicInst>(Inst) &&
"Instruction not supported");
848 auto *II = cast<IntrinsicInst>(Inst);
849 if (isHandledNonTargetIntrinsic(II->getIntrinsicID()))
850 return getOrCreateResultNonTargetMemIntrinsic(II, ExpectedType);
855 Type *ExpectedType)
const {
857 case Intrinsic::masked_load:
859 case Intrinsic::masked_store:
867 bool isOperatingOnInvariantMemAt(
Instruction *
I,
unsigned GenAt);
869 bool isSameMemGeneration(
unsigned EarlierGeneration,
unsigned LaterGeneration,
874 auto IsSubmask = [](
const Value *Mask0,
const Value *Mask1) {
878 if (isa<UndefValue>(Mask0) || isa<UndefValue>(Mask1))
880 auto *Vec0 = dyn_cast<ConstantVector>(Mask0);
881 auto *Vec1 = dyn_cast<ConstantVector>(Mask1);
884 assert(Vec0->getType() == Vec1->getType() &&
885 "Masks should have the same type");
886 for (
int i = 0,
e = Vec0->getNumOperands();
i !=
e; ++
i) {
889 auto *Int0 = dyn_cast<ConstantInt>(Elem0);
890 if (Int0 && Int0->isZero())
892 auto *Int1 = dyn_cast<ConstantInt>(Elem1);
893 if (Int1 && !Int1->isZero())
895 if (isa<UndefValue>(Elem0) || isa<UndefValue>(Elem1))
923 if (PtrOp(Earlier) != PtrOp(Later))
930 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_load) {
936 if (MaskOp(Earlier) == MaskOp(Later) && ThruOp(Earlier) == ThruOp(Later))
938 if (!isa<UndefValue>(ThruOp(Later)))
940 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
942 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_load) {
947 if (!IsSubmask(MaskOp(Later), MaskOp(Earlier)))
949 return isa<UndefValue>(ThruOp(Later));
951 if (IDE == Intrinsic::masked_load && IDL == Intrinsic::masked_store) {
955 return IsSubmask(MaskOp(Later), MaskOp(Earlier));
957 if (IDE == Intrinsic::masked_store && IDL == Intrinsic::masked_store) {
962 return IsSubmask(MaskOp(Earlier), MaskOp(Later));
978 MSSAUpdater->removeMemoryAccess(&Inst,
true);
1000 bool EarlyCSE::isSameMemGeneration(
unsigned EarlierGeneration,
1001 unsigned LaterGeneration,
1005 if (EarlierGeneration == LaterGeneration)
1034 LaterDef = LaterMA->getDefiningAccess();
1036 return MSSA->
dominates(LaterDef, EarlierMA);
1039 bool EarlyCSE::isOperatingOnInvariantMemAt(
Instruction *
I,
unsigned GenAt) {
1042 if (
auto *LI = dyn_cast<LoadInst>(
I))
1043 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1052 if (!AvailableInvariants.count(MemLoc))
1057 return AvailableInvariants.lookup(MemLoc) <= GenAt;
1060 bool EarlyCSE::handleBranchCondition(
Instruction *CondInst,
1071 if (Opcode == Instruction::And &&
1074 else if (Opcode == Instruction::Or &&
1082 unsigned PropagateOpcode =
1083 (BI->
getSuccessor(0) ==
BB) ? Instruction::And : Instruction::Or;
1085 bool MadeChanges =
false;
1088 WorkList.push_back(CondInst);
1089 while (!WorkList.empty()) {
1092 AvailableValues.insert(Curr, TorF);
1094 << Curr->
getName() <<
"' as " << *TorF <<
" in "
1095 <<
BB->getName() <<
"\n");
1108 if (MatchBinOp(Curr, PropagateOpcode,
LHS,
RHS))
1111 if (SimpleValue::canHandle(OPI) && Visited.
insert(OPI).second)
1112 WorkList.push_back(OPI);
1118 Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
1119 unsigned CurrentGeneration) {
1120 if (InVal.DefInst ==
nullptr)
1122 if (InVal.MatchingId != MemInst.getMatchingId())
1125 if (MemInst.isVolatile() || !MemInst.isUnordered())
1128 if (MemInst.isLoad() && !InVal.IsAtomic && MemInst.isAtomic())
1134 bool MemInstMatching = !MemInst.isLoad();
1135 Instruction *Matching = MemInstMatching ? MemInst.get() : InVal.DefInst;
1141 ? getOrCreateResult(Matching,
Other->getType())
1143 if (MemInst.isStore() && InVal.DefInst != Result)
1147 bool MatchingNTI = isHandledNonTargetIntrinsic(Matching);
1148 bool OtherNTI = isHandledNonTargetIntrinsic(Other);
1149 if (OtherNTI != MatchingNTI)
1151 if (OtherNTI && MatchingNTI) {
1152 if (!isNonTargetIntrinsicMatch(cast<IntrinsicInst>(InVal.DefInst),
1153 cast<IntrinsicInst>(MemInst.get())))
1157 if (!isOperatingOnInvariantMemAt(MemInst.get(), InVal.Generation) &&
1158 !isSameMemGeneration(InVal.Generation, CurrentGeneration, InVal.DefInst,
1163 Result = getOrCreateResult(Matching,
Other->getType());
1167 bool EarlyCSE::overridingStores(
const ParseMemoryInst &Earlier,
1168 const ParseMemoryInst &Later) {
1171 assert(Earlier.isUnordered() && !Earlier.isVolatile() &&
1172 "Violated invariant");
1173 if (Earlier.getPointerOperand() != Later.getPointerOperand())
1175 if (!Earlier.getValueType() || !Later.getValueType() ||
1176 Earlier.getValueType() != Later.getValueType())
1178 if (Earlier.getMatchingId() != Later.getMatchingId())
1185 if (!Earlier.isUnordered() || !Later.isUnordered())
1189 bool ENTI = isHandledNonTargetIntrinsic(Earlier.get());
1190 bool LNTI = isHandledNonTargetIntrinsic(Later.get());
1192 return isNonTargetIntrinsicMatch(cast<IntrinsicInst>(Earlier.get()),
1193 cast<IntrinsicInst>(Later.get()));
1198 return ENTI == LNTI;
1202 bool Changed =
false;
1211 if (!
BB->getSinglePredecessor())
1212 ++CurrentGeneration;
1223 auto *CondInst = dyn_cast<Instruction>(BI->
getCondition());
1224 if (CondInst && SimpleValue::canHandle(CondInst))
1225 Changed |= handleBranchCondition(CondInst, BI,
BB, Pred);
1259 if (
auto *Assume = dyn_cast<AssumeInst>(&Inst)) {
1260 auto *CondI = dyn_cast<Instruction>(Assume->getArgOperand(0));
1261 if (CondI && SimpleValue::canHandle(CondI)) {
1266 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping assumption: " << Inst <<
'\n');
1272 m_Intrinsic<Intrinsic::experimental_noalias_scope_decl>())) {
1273 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping noalias intrinsic: " << Inst
1279 if (
match(&Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
1280 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping sideeffect: " << Inst <<
'\n');
1285 if (
match(&Inst, m_Intrinsic<Intrinsic::pseudoprobe>())) {
1286 LLVM_DEBUG(
dbgs() <<
"EarlyCSE skipping pseudoprobe: " << Inst <<
'\n');
1303 if (
match(&Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
1310 if (!AvailableInvariants.count(MemLoc))
1311 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1317 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0))) {
1318 if (SimpleValue::canHandle(CondI)) {
1320 if (
auto *KnownCond = AvailableValues.lookup(CondI)) {
1322 if (isa<ConstantInt>(KnownCond) &&
1323 cast<ConstantInt>(KnownCond)->isOne()) {
1325 <<
"EarlyCSE removing guard: " << Inst <<
'\n');
1333 cast<CallInst>(Inst).setArgOperand(0, KnownCond);
1344 LastStore =
nullptr;
1351 LLVM_DEBUG(
dbgs() <<
"EarlyCSE Simplify: " << Inst <<
" to: " << *V
1356 bool Killed =
false;
1376 if (SimpleValue::canHandle(&Inst)) {
1378 if (
Value *V = AvailableValues.lookup(&Inst)) {
1385 if (
auto *
I = dyn_cast<Instruction>(V)) {
1393 I->andIRFlags(&Inst);
1405 AvailableValues.insert(&Inst, &Inst);
1409 ParseMemoryInst MemInst(&Inst,
TTI);
1411 if (MemInst.isValid() && MemInst.isLoad()) {
1414 if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1415 LastStore =
nullptr;
1416 ++CurrentGeneration;
1419 if (MemInst.isInvariantLoad()) {
1426 if (!AvailableInvariants.count(MemLoc))
1427 AvailableInvariants.insert(MemLoc, CurrentGeneration);
1437 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1438 if (
Value *
Op = getMatchingValue(InVal, MemInst, CurrentGeneration)) {
1440 <<
" to: " << *InVal.DefInst <<
'\n');
1456 AvailableLoads.insert(MemInst.getPointerOperand(),
1457 LoadValue(&Inst, CurrentGeneration,
1458 MemInst.getMatchingId(),
1459 MemInst.isAtomic()));
1460 LastStore =
nullptr;
1471 !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1472 LastStore =
nullptr;
1475 if (CallValue::canHandle(&Inst)) {
1478 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst);
1479 if (InVal.first !=
nullptr &&
1480 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1483 <<
" to: " << *InVal.first <<
'\n');
1499 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration));
1508 if (
auto *FI = dyn_cast<FenceInst>(&Inst))
1519 if (MemInst.isValid() && MemInst.isStore()) {
1520 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1521 if (InVal.DefInst &&
1522 InVal.DefInst == getMatchingValue(InVal, MemInst, CurrentGeneration)) {
1529 MemInst.getPointerOperand() ||
1531 "can't have an intervening store if not using MemorySSA!");
1532 LLVM_DEBUG(
dbgs() <<
"EarlyCSE DSE (writeback): " << Inst <<
'\n');
1552 ++CurrentGeneration;
1554 if (MemInst.isValid() && MemInst.isStore()) {
1558 if (overridingStores(ParseMemoryInst(LastStore,
TTI), MemInst)) {
1560 <<
" due to: " << Inst <<
'\n');
1565 removeMSSA(*LastStore);
1569 LastStore =
nullptr;
1580 AvailableLoads.insert(MemInst.getPointerOperand(),
1581 LoadValue(&Inst, CurrentGeneration,
1582 MemInst.getMatchingId(),
1583 MemInst.isAtomic()));
1592 if (MemInst.isUnordered() && !MemInst.isVolatile())
1595 LastStore =
nullptr;
1609 std::deque<StackNode *> nodesToProcess;
1611 bool Changed =
false;
1614 nodesToProcess.push_back(
new StackNode(
1615 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1619 assert(!CurrentGeneration &&
"Create a new EarlyCSE instance to rerun it.");
1622 while (!nodesToProcess.empty()) {
1625 StackNode *NodeToProcess = nodesToProcess.back();
1628 CurrentGeneration = NodeToProcess->currentGeneration();
1631 if (!NodeToProcess->isProcessed()) {
1633 Changed |= processNode(NodeToProcess->node());
1634 NodeToProcess->childGeneration(CurrentGeneration);
1635 NodeToProcess->process();
1636 }
else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1639 nodesToProcess.push_back(
1640 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1641 AvailableCalls, NodeToProcess->childGeneration(),
1642 child, child->
begin(), child->
end()));
1646 delete NodeToProcess;
1647 nodesToProcess.pop_back();
1663 EarlyCSE
CSE(
F.getParent()->getDataLayout(), TLI,
TTI, DT, AC, MSSA);
1678 OS, MapClassName2PassName);
1694 template<
bool UseMemorySSA>
1707 if (skipFunction(
F))
1710 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
1711 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
1712 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1713 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
1715 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() :
nullptr;
1717 EarlyCSE
CSE(
F.getParent()->getDataLayout(), TLI,
TTI, DT, AC, MSSA);
1753 using EarlyCSEMemSSALegacyPass =
1754 EarlyCSELegacyCommonPass<
true>;
1757 char EarlyCSEMemSSALegacyPass::
ID = 0;
1761 return new EarlyCSEMemSSALegacyPass();
1767 "Early CSE w/ MemorySSA",
false,
false)