LLVM  16.0.0git
CodeGenPrepare.cpp
Go to the documentation of this file.
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/IR/Argument.h"
43 #include "llvm/IR/Attributes.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfo.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GlobalValue.h"
54 #include "llvm/IR/GlobalVariable.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/IntrinsicsAArch64.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/MDBuilder.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/ProfDataUtils.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/ValueHandle.h"
75 #include "llvm/IR/ValueMap.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
80 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
95 #include <algorithm>
96 #include <cassert>
97 #include <cstdint>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <optional>
102 #include <utility>
103 #include <vector>
104 
105 using namespace llvm;
106 using namespace llvm::PatternMatch;
107 
108 #define DEBUG_TYPE "codegenprepare"
109 
110 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
111 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
112 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
113 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
114  "sunken Cmps");
115 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
116  "of sunken Casts");
117 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
118  "computations were sunk");
119 STATISTIC(NumMemoryInstsPhiCreated,
120  "Number of phis created when address "
121  "computations were sunk to memory instructions");
122 STATISTIC(NumMemoryInstsSelectCreated,
123  "Number of select created when address "
124  "computations were sunk to memory instructions");
125 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
126 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
127 STATISTIC(NumAndsAdded,
128  "Number of and mask instructions added to form ext loads");
129 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
130 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
131 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
132 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
133 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
134 
136  "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
137  cl::desc("Disable branch optimizations in CodeGenPrepare"));
138 
139 static cl::opt<bool>
140  DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
141  cl::desc("Disable GC optimizations in CodeGenPrepare"));
142 
143 static cl::opt<bool>
144  DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
145  cl::init(false),
146  cl::desc("Disable select to branch conversion."));
147 
148 static cl::opt<bool>
149  AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
150  cl::desc("Address sinking in CGP using GEPs."));
151 
152 static cl::opt<bool>
153  EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
154  cl::desc("Enable sinkinig and/cmp into branches."));
155 
157  "disable-cgp-store-extract", cl::Hidden, cl::init(false),
158  cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
159 
161  "stress-cgp-store-extract", cl::Hidden, cl::init(false),
162  cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
163 
165  "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
166  cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
167  "CodeGenPrepare"));
168 
170  "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
171  cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
172  "optimization in CodeGenPrepare"));
173 
175  "disable-preheader-prot", cl::Hidden, cl::init(false),
176  cl::desc("Disable protection against removing loop preheaders"));
177 
179  "profile-guided-section-prefix", cl::Hidden, cl::init(true),
180  cl::desc("Use profile info to add section prefix for hot/cold functions"));
181 
183  "profile-unknown-in-special-section", cl::Hidden,
184  cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
185  "profile, we cannot tell the function is cold for sure because "
186  "it may be a function newly added without ever being sampled. "
187  "With the flag enabled, compiler can put such profile unknown "
188  "functions into a special section, so runtime system can choose "
189  "to handle it in a different way than .text section, to save "
190  "RAM for example. "));
191 
193  "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
194  cl::desc("Use the basic-block-sections profile to determine the text "
195  "section prefix for hot functions. Functions with "
196  "basic-block-sections profile will be placed in `.text.hot` "
197  "regardless of their FDO profile info. Other functions won't be "
198  "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
199  "profiles."));
200 
202  "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
203  cl::desc("Skip merging empty blocks if (frequency of empty block) / "
204  "(frequency of destination block) is greater than this ratio"));
205 
207  "force-split-store", cl::Hidden, cl::init(false),
208  cl::desc("Force store splitting no matter what the target query says."));
209 
211  "cgp-type-promotion-merge", cl::Hidden,
212  cl::desc("Enable merging of redundant sexts when one is dominating"
213  " the other."),
214  cl::init(true));
215 
217  "disable-complex-addr-modes", cl::Hidden, cl::init(false),
218  cl::desc("Disables combining addressing modes with different parts "
219  "in optimizeMemoryInst."));
220 
221 static cl::opt<bool>
222  AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
223  cl::desc("Allow creation of Phis in Address sinking."));
224 
226  "addr-sink-new-select", cl::Hidden, cl::init(true),
227  cl::desc("Allow creation of selects in Address sinking."));
228 
230  "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
231  cl::desc("Allow combining of BaseReg field in Address sinking."));
232 
234  "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
235  cl::desc("Allow combining of BaseGV field in Address sinking."));
236 
238  "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
239  cl::desc("Allow combining of BaseOffs field in Address sinking."));
240 
242  "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
243  cl::desc("Allow combining of ScaledReg field in Address sinking."));
244 
245 static cl::opt<bool>
246  EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
247  cl::init(true),
248  cl::desc("Enable splitting large offset of GEP."));
249 
251  "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
252  cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
253 
254 static cl::opt<bool>
255  VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
256  cl::desc("Enable BFI update verification for "
257  "CodeGenPrepare."));
258 
259 static cl::opt<bool>
260  OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(false),
261  cl::desc("Enable converting phi types in CodeGenPrepare"));
262 
263 static cl::opt<unsigned>
264  HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
265  cl::desc("Least BB number of huge function."));
266 
267 namespace {
268 
269 enum ExtType {
270  ZeroExtension, // Zero extension has been seen.
271  SignExtension, // Sign extension has been seen.
272  BothExtension // This extension type is used if we saw sext after
273  // ZeroExtension had been set, or if we saw zext after
274  // SignExtension had been set. It makes the type
275  // information of a promoted instruction invalid.
276 };
277 
278 enum ModifyDT {
279  NotModifyDT, // Not Modify any DT.
280  ModifyBBDT, // Modify the Basic Block Dominator Tree.
281  ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
282  // This usually means we move/delete/insert instruction
283  // in a Basic Block. So we should re-iterate instructions
284  // in such Basic Block.
285 };
286 
287 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
288 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
289 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
290 using SExts = SmallVector<Instruction *, 16>;
291 using ValueToSExts = MapVector<Value *, SExts>;
292 
293 class TypePromotionTransaction;
294 
295 class CodeGenPrepare : public FunctionPass {
296  const TargetMachine *TM = nullptr;
297  const TargetSubtargetInfo *SubtargetInfo;
298  const TargetLowering *TLI = nullptr;
299  const TargetRegisterInfo *TRI;
300  const TargetTransformInfo *TTI = nullptr;
301  const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
302  const TargetLibraryInfo *TLInfo;
303  const LoopInfo *LI;
304  std::unique_ptr<BlockFrequencyInfo> BFI;
305  std::unique_ptr<BranchProbabilityInfo> BPI;
306  ProfileSummaryInfo *PSI;
307 
308  /// As we scan instructions optimizing them, this is the next instruction
309  /// to optimize. Transforms that can invalidate this should update it.
310  BasicBlock::iterator CurInstIterator;
311 
312  /// Keeps track of non-local addresses that have been sunk into a block.
313  /// This allows us to avoid inserting duplicate code for blocks with
314  /// multiple load/stores of the same address. The usage of WeakTrackingVH
315  /// enables SunkAddrs to be treated as a cache whose entries can be
316  /// invalidated if a sunken address computation has been erased.
318 
319  /// Keeps track of all instructions inserted for the current function.
320  SetOfInstrs InsertedInsts;
321 
322  /// Keeps track of the type of the related instruction before their
323  /// promotion for the current function.
324  InstrToOrigTy PromotedInsts;
325 
326  /// Keep track of instructions removed during promotion.
327  SetOfInstrs RemovedInsts;
328 
329  /// Keep track of sext chains based on their initial value.
330  DenseMap<Value *, Instruction *> SeenChainsForSExt;
331 
332  /// Keep track of GEPs accessing the same data structures such as structs or
333  /// arrays that are candidates to be split later because of their large
334  /// size.
337  LargeOffsetGEPMap;
338 
339  /// Keep track of new GEP base after splitting the GEPs having large offset.
340  SmallSet<AssertingVH<Value>, 2> NewGEPBases;
341 
342  /// Map serial numbers to Large offset GEPs.
343  DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
344 
345  /// Keep track of SExt promoted.
346  ValueToSExts ValToSExtendedUses;
347 
348  /// True if the function has the OptSize attribute.
349  bool OptSize;
350 
351  /// DataLayout for the Function being processed.
352  const DataLayout *DL = nullptr;
353 
354  /// Building the dominator tree can be expensive, so we only build it
355  /// lazily and update it when required.
356  std::unique_ptr<DominatorTree> DT;
357 
358 public:
359  /// If encounter huge function, we need to limit the build time.
360  bool IsHugeFunc = false;
361 
362  /// FreshBBs is like worklist, it collected the updated BBs which need
363  /// to be optimized again.
364  /// Note: Consider building time in this pass, when a BB updated, we need
365  /// to insert such BB into FreshBBs for huge function.
367 
368  static char ID; // Pass identification, replacement for typeid
369 
370  CodeGenPrepare() : FunctionPass(ID) {
372  }
373 
374  bool runOnFunction(Function &F) override;
375 
376  StringRef getPassName() const override { return "CodeGen Prepare"; }
377 
378  void getAnalysisUsage(AnalysisUsage &AU) const override {
379  // FIXME: When we can selectively preserve passes, preserve the domtree.
386  }
387 
388 private:
389  template <typename F>
390  void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
391  // Substituting can cause recursive simplifications, which can invalidate
392  // our iterator. Use a WeakTrackingVH to hold onto it in case this
393  // happens.
394  Value *CurValue = &*CurInstIterator;
395  WeakTrackingVH IterHandle(CurValue);
396 
397  f();
398 
399  // If the iterator instruction was recursively deleted, start over at the
400  // start of the block.
401  if (IterHandle != CurValue) {
402  CurInstIterator = BB->begin();
403  SunkAddrs.clear();
404  }
405  }
406 
407  // Get the DominatorTree, building if necessary.
408  DominatorTree &getDT(Function &F) {
409  if (!DT)
410  DT = std::make_unique<DominatorTree>(F);
411  return *DT;
412  }
413 
414  void removeAllAssertingVHReferences(Value *V);
415  bool eliminateAssumptions(Function &F);
416  bool eliminateFallThrough(Function &F);
417  bool eliminateMostlyEmptyBlocks(Function &F);
418  BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
419  bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
420  void eliminateMostlyEmptyBlock(BasicBlock *BB);
421  bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
422  bool isPreheader);
423  bool makeBitReverse(Instruction &I);
424  bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
425  bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
426  bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
427  unsigned AddrSpace);
428  bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
429  bool optimizeInlineAsmInst(CallInst *CS);
430  bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
431  bool optimizeExt(Instruction *&I);
432  bool optimizeExtUses(Instruction *I);
433  bool optimizeLoadExt(LoadInst *Load);
434  bool optimizeShiftInst(BinaryOperator *BO);
435  bool optimizeFunnelShift(IntrinsicInst *Fsh);
436  bool optimizeSelectInst(SelectInst *SI);
437  bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
438  bool optimizeSwitchType(SwitchInst *SI);
439  bool optimizeSwitchPhiConstants(SwitchInst *SI);
440  bool optimizeSwitchInst(SwitchInst *SI);
441  bool optimizeExtractElementInst(Instruction *Inst);
442  bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
443  bool fixupDbgValue(Instruction *I);
444  bool placeDbgValues(Function &F);
445  bool placePseudoProbes(Function &F);
446  bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
447  LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
448  bool tryToPromoteExts(TypePromotionTransaction &TPT,
449  const SmallVectorImpl<Instruction *> &Exts,
450  SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
451  unsigned CreatedInstsCost = 0);
452  bool mergeSExts(Function &F);
453  bool splitLargeGEPOffsets();
454  bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
455  SmallPtrSetImpl<Instruction *> &DeletedInstrs);
456  bool optimizePhiTypes(Function &F);
457  bool performAddressTypePromotion(
458  Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
459  bool HasPromoted, TypePromotionTransaction &TPT,
460  SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
461  bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
462  bool simplifyOffsetableRelocate(GCStatepointInst &I);
463 
464  bool tryToSinkFreeOperands(Instruction *I);
465  bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
466  CmpInst *Cmp, Intrinsic::ID IID);
467  bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
468  bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
469  bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
470  void verifyBFIUpdates(Function &F);
471 };
472 
473 } // end anonymous namespace
474 
475 char CodeGenPrepare::ID = 0;
476 
477 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
478  "Optimize for code generation", false, false)
485 INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, "Optimize for code generation",
487 
488 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
489 
491  if (skipFunction(F))
492  return false;
493 
494  DL = &F.getParent()->getDataLayout();
495 
496  bool EverMadeChange = false;
497  // Clear per function information.
498  InsertedInsts.clear();
499  PromotedInsts.clear();
500  FreshBBs.clear();
501 
502  TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
503  SubtargetInfo = TM->getSubtargetImpl(F);
504  TLI = SubtargetInfo->getTargetLowering();
505  TRI = SubtargetInfo->getRegisterInfo();
506  TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
507  TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
508  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
509  BPI.reset(new BranchProbabilityInfo(F, *LI));
510  BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
511  PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
512  BBSectionsProfileReader =
513  getAnalysisIfAvailable<BasicBlockSectionsProfileReader>();
514  OptSize = F.hasOptSize();
515  // Use the basic-block-sections profile to promote hot functions to .text.hot
516  // if requested.
517  if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
518  BBSectionsProfileReader->isFunctionHot(F.getName())) {
519  F.setSectionPrefix("hot");
520  } else if (ProfileGuidedSectionPrefix) {
521  // The hot attribute overwrites profile count based hotness while profile
522  // counts based hotness overwrite the cold attribute.
523  // This is a conservative behabvior.
524  if (F.hasFnAttribute(Attribute::Hot) ||
525  PSI->isFunctionHotInCallGraph(&F, *BFI))
526  F.setSectionPrefix("hot");
527  // If PSI shows this function is not hot, we will placed the function
528  // into unlikely section if (1) PSI shows this is a cold function, or
529  // (2) the function has a attribute of cold.
530  else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
531  F.hasFnAttribute(Attribute::Cold))
532  F.setSectionPrefix("unlikely");
535  F.setSectionPrefix("unknown");
536  }
537 
538  /// This optimization identifies DIV instructions that can be
539  /// profitably bypassed and carried out with a shorter, faster divide.
540  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
541  const DenseMap<unsigned int, unsigned int> &BypassWidths =
542  TLI->getBypassSlowDivWidths();
543  BasicBlock *BB = &*F.begin();
544  while (BB != nullptr) {
545  // bypassSlowDivision may create new BBs, but we don't want to reapply the
546  // optimization to those blocks.
547  BasicBlock *Next = BB->getNextNode();
548  // F.hasOptSize is already checked in the outer if statement.
549  if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
550  EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
551  BB = Next;
552  }
553  }
554 
555  // Get rid of @llvm.assume builtins before attempting to eliminate empty
556  // blocks, since there might be blocks that only contain @llvm.assume calls
557  // (plus arguments that we can get rid of).
558  EverMadeChange |= eliminateAssumptions(F);
559 
560  // Eliminate blocks that contain only PHI nodes and an
561  // unconditional branch.
562  EverMadeChange |= eliminateMostlyEmptyBlocks(F);
563 
564  ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
565  if (!DisableBranchOpts)
566  EverMadeChange |= splitBranchCondition(F, ModifiedDT);
567 
568  // Split some critical edges where one of the sources is an indirect branch,
569  // to help generate sane code for PHIs involving such edges.
570  EverMadeChange |=
571  SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
572 
573  // If we are optimzing huge function, we need to consider the build time.
574  // Because the basic algorithm's complex is near O(N!).
575  IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
576 
577  bool MadeChange = true;
578  bool FuncIterated = false;
579  while (MadeChange) {
580  MadeChange = false;
581  DT.reset();
582 
584  if (FuncIterated && !FreshBBs.contains(&BB))
585  continue;
586 
587  ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
588  bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
589 
590  MadeChange |= Changed;
591  if (IsHugeFunc) {
592  // If the BB is updated, it may still has chance to be optimized.
593  // This usually happen at sink optimization.
594  // For example:
595  //
596  // bb0:
597  // %and = and i32 %a, 4
598  // %cmp = icmp eq i32 %and, 0
599  //
600  // If the %cmp sink to other BB, the %and will has chance to sink.
601  if (Changed)
602  FreshBBs.insert(&BB);
603  else if (FuncIterated)
604  FreshBBs.erase(&BB);
605 
606  if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
607  DT.reset();
608  } else {
609  // For small/normal functions, we restart BB iteration if the dominator
610  // tree of the Function was changed.
611  if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
612  break;
613  }
614  }
615  // We have iterated all the BB in the (only work for huge) function.
616  FuncIterated = IsHugeFunc;
617 
618  if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
619  MadeChange |= mergeSExts(F);
620  if (!LargeOffsetGEPMap.empty())
621  MadeChange |= splitLargeGEPOffsets();
622  MadeChange |= optimizePhiTypes(F);
623 
624  if (MadeChange)
625  eliminateFallThrough(F);
626 
627  // Really free removed instructions during promotion.
628  for (Instruction *I : RemovedInsts)
629  I->deleteValue();
630 
631  EverMadeChange |= MadeChange;
632  SeenChainsForSExt.clear();
633  ValToSExtendedUses.clear();
634  RemovedInsts.clear();
635  LargeOffsetGEPMap.clear();
636  LargeOffsetGEPID.clear();
637  }
638 
639  NewGEPBases.clear();
640  SunkAddrs.clear();
641 
642  if (!DisableBranchOpts) {
643  MadeChange = false;
644  // Use a set vector to get deterministic iteration order. The order the
645  // blocks are removed may affect whether or not PHI nodes in successors
646  // are removed.
648  for (BasicBlock &BB : F) {
650  MadeChange |= ConstantFoldTerminator(&BB, true);
651  if (!MadeChange)
652  continue;
653 
654  for (BasicBlock *Succ : Successors)
655  if (pred_empty(Succ))
656  WorkList.insert(Succ);
657  }
658 
659  // Delete the dead blocks and any of their dead successors.
660  MadeChange |= !WorkList.empty();
661  while (!WorkList.empty()) {
662  BasicBlock *BB = WorkList.pop_back_val();
664 
666 
667  for (BasicBlock *Succ : Successors)
668  if (pred_empty(Succ))
669  WorkList.insert(Succ);
670  }
671 
672  // Merge pairs of basic blocks with unconditional branches, connected by
673  // a single edge.
674  if (EverMadeChange || MadeChange)
675  MadeChange |= eliminateFallThrough(F);
676 
677  EverMadeChange |= MadeChange;
678  }
679 
680  if (!DisableGCOpts) {
682  for (BasicBlock &BB : F)
683  for (Instruction &I : BB)
684  if (auto *SP = dyn_cast<GCStatepointInst>(&I))
685  Statepoints.push_back(SP);
686  for (auto &I : Statepoints)
687  EverMadeChange |= simplifyOffsetableRelocate(*I);
688  }
689 
690  // Do this last to clean up use-before-def scenarios introduced by other
691  // preparatory transforms.
692  EverMadeChange |= placeDbgValues(F);
693  EverMadeChange |= placePseudoProbes(F);
694 
695 #ifndef NDEBUG
696  if (VerifyBFIUpdates)
697  verifyBFIUpdates(F);
698 #endif
699 
700  return EverMadeChange;
701 }
702 
703 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
704  bool MadeChange = false;
705  for (BasicBlock &BB : F) {
706  CurInstIterator = BB.begin();
707  while (CurInstIterator != BB.end()) {
708  Instruction *I = &*(CurInstIterator++);
709  if (auto *Assume = dyn_cast<AssumeInst>(I)) {
710  MadeChange = true;
711  Value *Operand = Assume->getOperand(0);
712  Assume->eraseFromParent();
713 
714  resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
715  RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
716  });
717  }
718  }
719  }
720  return MadeChange;
721 }
722 
723 /// An instruction is about to be deleted, so remove all references to it in our
724 /// GEP-tracking data strcutures.
725 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
726  LargeOffsetGEPMap.erase(V);
727  NewGEPBases.erase(V);
728 
729  auto GEP = dyn_cast<GetElementPtrInst>(V);
730  if (!GEP)
731  return;
732 
733  LargeOffsetGEPID.erase(GEP);
734 
735  auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
736  if (VecI == LargeOffsetGEPMap.end())
737  return;
738 
739  auto &GEPVector = VecI->second;
740  llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
741 
742  if (GEPVector.empty())
743  LargeOffsetGEPMap.erase(VecI);
744 }
745 
746 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
747 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
748  DominatorTree NewDT(F);
749  LoopInfo NewLI(NewDT);
750  BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
751  BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
752  NewBFI.verifyMatch(*BFI);
753 }
754 
755 /// Merge basic blocks which are connected by a single edge, where one of the
756 /// basic blocks has a single successor pointing to the other basic block,
757 /// which has a single predecessor.
758 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
759  bool Changed = false;
760  // Scan all of the blocks in the function, except for the entry block.
761  // Use a temporary array to avoid iterator being invalidated when
762  // deleting blocks.
764  for (auto &Block : llvm::drop_begin(F))
765  Blocks.push_back(&Block);
766 
768  for (auto &Block : Blocks) {
769  auto *BB = cast_or_null<BasicBlock>(Block);
770  if (!BB)
771  continue;
772  // If the destination block has a single pred, then this is a trivial
773  // edge, just collapse it.
774  BasicBlock *SinglePred = BB->getSinglePredecessor();
775 
776  // Don't merge if BB's address is taken.
777  if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
778  continue;
779 
780  BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
781  if (Term && !Term->isConditional()) {
782  Changed = true;
783  LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
784 
785  // Merge BB into SinglePred and delete it.
787  Preds.insert(SinglePred);
788 
789  if (IsHugeFunc) {
790  // Update FreshBBs to optimize the merged BB.
791  FreshBBs.insert(SinglePred);
792  FreshBBs.erase(BB);
793  }
794  }
795  }
796 
797  // (Repeatedly) merging blocks into their predecessors can create redundant
798  // debug intrinsics.
799  for (const auto &Pred : Preds)
800  if (auto *BB = cast_or_null<BasicBlock>(Pred))
802 
803  return Changed;
804 }
805 
806 /// Find a destination block from BB if BB is mergeable empty block.
807 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
808  // If this block doesn't end with an uncond branch, ignore it.
809  BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
810  if (!BI || !BI->isUnconditional())
811  return nullptr;
812 
813  // If the instruction before the branch (skipping debug info) isn't a phi
814  // node, then other stuff is happening here.
815  BasicBlock::iterator BBI = BI->getIterator();
816  if (BBI != BB->begin()) {
817  --BBI;
818  while (isa<DbgInfoIntrinsic>(BBI)) {
819  if (BBI == BB->begin())
820  break;
821  --BBI;
822  }
823  if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
824  return nullptr;
825  }
826 
827  // Do not break infinite loops.
828  BasicBlock *DestBB = BI->getSuccessor(0);
829  if (DestBB == BB)
830  return nullptr;
831 
832  if (!canMergeBlocks(BB, DestBB))
833  DestBB = nullptr;
834 
835  return DestBB;
836 }
837 
838 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
839 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
840 /// edges in ways that are non-optimal for isel. Start by eliminating these
841 /// blocks so we can split them the way we want them.
842 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
844  SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
845  while (!LoopList.empty()) {
846  Loop *L = LoopList.pop_back_val();
847  llvm::append_range(LoopList, *L);
848  if (BasicBlock *Preheader = L->getLoopPreheader())
849  Preheaders.insert(Preheader);
850  }
851 
852  bool MadeChange = false;
853  // Copy blocks into a temporary array to avoid iterator invalidation issues
854  // as we remove them.
855  // Note that this intentionally skips the entry block.
857  for (auto &Block : llvm::drop_begin(F))
858  Blocks.push_back(&Block);
859 
860  for (auto &Block : Blocks) {
861  BasicBlock *BB = cast_or_null<BasicBlock>(Block);
862  if (!BB)
863  continue;
864  BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
865  if (!DestBB ||
866  !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
867  continue;
868 
869  eliminateMostlyEmptyBlock(BB);
870  MadeChange = true;
871  }
872  return MadeChange;
873 }
874 
875 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
876  BasicBlock *DestBB,
877  bool isPreheader) {
878  // Do not delete loop preheaders if doing so would create a critical edge.
879  // Loop preheaders can be good locations to spill registers. If the
880  // preheader is deleted and we create a critical edge, registers may be
881  // spilled in the loop body instead.
882  if (!DisablePreheaderProtect && isPreheader &&
883  !(BB->getSinglePredecessor() &&
884  BB->getSinglePredecessor()->getSingleSuccessor()))
885  return false;
886 
887  // Skip merging if the block's successor is also a successor to any callbr
888  // that leads to this block.
889  // FIXME: Is this really needed? Is this a correctness issue?
890  for (BasicBlock *Pred : predecessors(BB)) {
891  if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
892  for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
893  if (DestBB == CBI->getSuccessor(i))
894  return false;
895  }
896 
897  // Try to skip merging if the unique predecessor of BB is terminated by a
898  // switch or indirect branch instruction, and BB is used as an incoming block
899  // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
900  // add COPY instructions in the predecessor of BB instead of BB (if it is not
901  // merged). Note that the critical edge created by merging such blocks wont be
902  // split in MachineSink because the jump table is not analyzable. By keeping
903  // such empty block (BB), ISel will place COPY instructions in BB, not in the
904  // predecessor of BB.
905  BasicBlock *Pred = BB->getUniquePredecessor();
906  if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
907  isa<IndirectBrInst>(Pred->getTerminator())))
908  return true;
909 
910  if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
911  return true;
912 
913  // We use a simple cost heuristic which determine skipping merging is
914  // profitable if the cost of skipping merging is less than the cost of
915  // merging : Cost(skipping merging) < Cost(merging BB), where the
916  // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
917  // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
918  // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
919  // Freq(Pred) / Freq(BB) > 2.
920  // Note that if there are multiple empty blocks sharing the same incoming
921  // value for the PHIs in the DestBB, we consider them together. In such
922  // case, Cost(merging BB) will be the sum of their frequencies.
923 
924  if (!isa<PHINode>(DestBB->begin()))
925  return true;
926 
927  SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
928 
929  // Find all other incoming blocks from which incoming values of all PHIs in
930  // DestBB are the same as the ones from BB.
931  for (BasicBlock *DestBBPred : predecessors(DestBB)) {
932  if (DestBBPred == BB)
933  continue;
934 
935  if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
936  return DestPN.getIncomingValueForBlock(BB) ==
937  DestPN.getIncomingValueForBlock(DestBBPred);
938  }))
939  SameIncomingValueBBs.insert(DestBBPred);
940  }
941 
942  // See if all BB's incoming values are same as the value from Pred. In this
943  // case, no reason to skip merging because COPYs are expected to be place in
944  // Pred already.
945  if (SameIncomingValueBBs.count(Pred))
946  return true;
947 
948  BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
949  BlockFrequency BBFreq = BFI->getBlockFreq(BB);
950 
951  for (auto *SameValueBB : SameIncomingValueBBs)
952  if (SameValueBB->getUniquePredecessor() == Pred &&
953  DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
954  BBFreq += BFI->getBlockFreq(SameValueBB);
955 
956  return PredFreq.getFrequency() <=
958 }
959 
960 /// Return true if we can merge BB into DestBB if there is a single
961 /// unconditional branch between them, and BB contains no other non-phi
962 /// instructions.
963 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
964  const BasicBlock *DestBB) const {
965  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
966  // the successor. If there are more complex condition (e.g. preheaders),
967  // don't mess around with them.
968  for (const PHINode &PN : BB->phis()) {
969  for (const User *U : PN.users()) {
970  const Instruction *UI = cast<Instruction>(U);
971  if (UI->getParent() != DestBB || !isa<PHINode>(UI))
972  return false;
973  // If User is inside DestBB block and it is a PHINode then check
974  // incoming value. If incoming value is not from BB then this is
975  // a complex condition (e.g. preheaders) we want to avoid here.
976  if (UI->getParent() == DestBB) {
977  if (const PHINode *UPN = dyn_cast<PHINode>(UI))
978  for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
979  Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
980  if (Insn && Insn->getParent() == BB &&
981  Insn->getParent() != UPN->getIncomingBlock(I))
982  return false;
983  }
984  }
985  }
986  }
987 
988  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
989  // and DestBB may have conflicting incoming values for the block. If so, we
990  // can't merge the block.
991  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
992  if (!DestBBPN)
993  return true; // no conflict.
994 
995  // Collect the preds of BB.
997  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
998  // It is faster to get preds from a PHI than with pred_iterator.
999  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1000  BBPreds.insert(BBPN->getIncomingBlock(i));
1001  } else {
1002  BBPreds.insert(pred_begin(BB), pred_end(BB));
1003  }
1004 
1005  // Walk the preds of DestBB.
1006  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1007  BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1008  if (BBPreds.count(Pred)) { // Common predecessor?
1009  for (const PHINode &PN : DestBB->phis()) {
1010  const Value *V1 = PN.getIncomingValueForBlock(Pred);
1011  const Value *V2 = PN.getIncomingValueForBlock(BB);
1012 
1013  // If V2 is a phi node in BB, look up what the mapped value will be.
1014  if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1015  if (V2PN->getParent() == BB)
1016  V2 = V2PN->getIncomingValueForBlock(Pred);
1017 
1018  // If there is a conflict, bail out.
1019  if (V1 != V2)
1020  return false;
1021  }
1022  }
1023  }
1024 
1025  return true;
1026 }
1027 
1028 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1029 static void replaceAllUsesWith(Value *Old, Value *New,
1030  SmallSet<BasicBlock *, 32> &FreshBBs,
1031  bool IsHuge) {
1032  auto *OldI = dyn_cast<Instruction>(Old);
1033  if (OldI) {
1034  for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1035  UI != E; ++UI) {
1036  Instruction *User = cast<Instruction>(*UI);
1037  if (IsHuge)
1038  FreshBBs.insert(User->getParent());
1039  }
1040  }
1041  Old->replaceAllUsesWith(New);
1042 }
1043 
1044 /// Eliminate a basic block that has only phi's and an unconditional branch in
1045 /// it.
1046 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1047  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1048  BasicBlock *DestBB = BI->getSuccessor(0);
1049 
1050  LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1051  << *BB << *DestBB);
1052 
1053  // If the destination block has a single pred, then this is a trivial edge,
1054  // just collapse it.
1055  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1056  if (SinglePred != DestBB) {
1057  assert(SinglePred == BB &&
1058  "Single predecessor not the same as predecessor");
1059  // Merge DestBB into SinglePred/BB and delete it.
1060  MergeBlockIntoPredecessor(DestBB);
1061  // Note: BB(=SinglePred) will not be deleted on this path.
1062  // DestBB(=its single successor) is the one that was deleted.
1063  LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1064 
1065  if (IsHugeFunc) {
1066  // Update FreshBBs to optimize the merged BB.
1067  FreshBBs.insert(SinglePred);
1068  FreshBBs.erase(DestBB);
1069  }
1070  return;
1071  }
1072  }
1073 
1074  // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
1075  // to handle the new incoming edges it is about to have.
1076  for (PHINode &PN : DestBB->phis()) {
1077  // Remove the incoming value for BB, and remember it.
1078  Value *InVal = PN.removeIncomingValue(BB, false);
1079 
1080  // Two options: either the InVal is a phi node defined in BB or it is some
1081  // value that dominates BB.
1082  PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1083  if (InValPhi && InValPhi->getParent() == BB) {
1084  // Add all of the input values of the input PHI as inputs of this phi.
1085  for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1086  PN.addIncoming(InValPhi->getIncomingValue(i),
1087  InValPhi->getIncomingBlock(i));
1088  } else {
1089  // Otherwise, add one instance of the dominating value for each edge that
1090  // we will be adding.
1091  if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1092  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1093  PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1094  } else {
1095  for (BasicBlock *Pred : predecessors(BB))
1096  PN.addIncoming(InVal, Pred);
1097  }
1098  }
1099  }
1100 
1101  // The PHIs are now updated, change everything that refers to BB to use
1102  // DestBB and remove BB.
1103  BB->replaceAllUsesWith(DestBB);
1104  BB->eraseFromParent();
1105  ++NumBlocksElim;
1106 
1107  LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1108 }
1109 
1110 // Computes a map of base pointer relocation instructions to corresponding
1111 // derived pointer relocation instructions given a vector of all relocate calls
1113  const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1115  &RelocateInstMap) {
1116  // Collect information in two maps: one primarily for locating the base object
1117  // while filling the second map; the second map is the final structure holding
1118  // a mapping between Base and corresponding Derived relocate calls
1120  for (auto *ThisRelocate : AllRelocateCalls) {
1121  auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1122  ThisRelocate->getDerivedPtrIndex());
1123  RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1124  }
1125  for (auto &Item : RelocateIdxMap) {
1126  std::pair<unsigned, unsigned> Key = Item.first;
1127  if (Key.first == Key.second)
1128  // Base relocation: nothing to insert
1129  continue;
1130 
1131  GCRelocateInst *I = Item.second;
1132  auto BaseKey = std::make_pair(Key.first, Key.first);
1133 
1134  // We're iterating over RelocateIdxMap so we cannot modify it.
1135  auto MaybeBase = RelocateIdxMap.find(BaseKey);
1136  if (MaybeBase == RelocateIdxMap.end())
1137  // TODO: We might want to insert a new base object relocate and gep off
1138  // that, if there are enough derived object relocates.
1139  continue;
1140 
1141  RelocateInstMap[MaybeBase->second].push_back(I);
1142  }
1143 }
1144 
1145 // Accepts a GEP and extracts the operands into a vector provided they're all
1146 // small integer constants
1148  SmallVectorImpl<Value *> &OffsetV) {
1149  for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1150  // Only accept small constant integer operands
1151  auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1152  if (!Op || Op->getZExtValue() > 20)
1153  return false;
1154  }
1155 
1156  for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1157  OffsetV.push_back(GEP->getOperand(i));
1158  return true;
1159 }
1160 
1161 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1162 // replace, computes a replacement, and affects it.
1163 static bool
1165  const SmallVectorImpl<GCRelocateInst *> &Targets) {
1166  bool MadeChange = false;
1167  // We must ensure the relocation of derived pointer is defined after
1168  // relocation of base pointer. If we find a relocation corresponding to base
1169  // defined earlier than relocation of base then we move relocation of base
1170  // right before found relocation. We consider only relocation in the same
1171  // basic block as relocation of base. Relocations from other basic block will
1172  // be skipped by optimization and we do not care about them.
1173  for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1174  &*R != RelocatedBase; ++R)
1175  if (auto *RI = dyn_cast<GCRelocateInst>(R))
1176  if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1177  if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1178  RelocatedBase->moveBefore(RI);
1179  break;
1180  }
1181 
1182  for (GCRelocateInst *ToReplace : Targets) {
1183  assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1184  "Not relocating a derived object of the original base object");
1185  if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1186  // A duplicate relocate call. TODO: coalesce duplicates.
1187  continue;
1188  }
1189 
1190  if (RelocatedBase->getParent() != ToReplace->getParent()) {
1191  // Base and derived relocates are in different basic blocks.
1192  // In this case transform is only valid when base dominates derived
1193  // relocate. However it would be too expensive to check dominance
1194  // for each such relocate, so we skip the whole transformation.
1195  continue;
1196  }
1197 
1198  Value *Base = ToReplace->getBasePtr();
1199  auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1200  if (!Derived || Derived->getPointerOperand() != Base)
1201  continue;
1202 
1203  SmallVector<Value *, 2> OffsetV;
1204  if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1205  continue;
1206 
1207  // Create a Builder and replace the target callsite with a gep
1208  assert(RelocatedBase->getNextNode() &&
1209  "Should always have one since it's not a terminator");
1210 
1211  // Insert after RelocatedBase
1212  IRBuilder<> Builder(RelocatedBase->getNextNode());
1213  Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1214 
1215  // If gc_relocate does not match the actual type, cast it to the right type.
1216  // In theory, there must be a bitcast after gc_relocate if the type does not
1217  // match, and we should reuse it to get the derived pointer. But it could be
1218  // cases like this:
1219  // bb1:
1220  // ...
1221  // %g1 = call coldcc i8 addrspace(1)*
1222  // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1223  //
1224  // bb2:
1225  // ...
1226  // %g2 = call coldcc i8 addrspace(1)*
1227  // @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1228  //
1229  // merge:
1230  // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1231  // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1232  //
1233  // In this case, we can not find the bitcast any more. So we insert a new
1234  // bitcast no matter there is already one or not. In this way, we can handle
1235  // all cases, and the extra bitcast should be optimized away in later
1236  // passes.
1237  Value *ActualRelocatedBase = RelocatedBase;
1238  if (RelocatedBase->getType() != Base->getType()) {
1239  ActualRelocatedBase =
1240  Builder.CreateBitCast(RelocatedBase, Base->getType());
1241  }
1242  Value *Replacement =
1243  Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1244  makeArrayRef(OffsetV));
1245  Replacement->takeName(ToReplace);
1246  // If the newly generated derived pointer's type does not match the original
1247  // derived pointer's type, cast the new derived pointer to match it. Same
1248  // reasoning as above.
1249  Value *ActualReplacement = Replacement;
1250  if (Replacement->getType() != ToReplace->getType()) {
1251  ActualReplacement =
1252  Builder.CreateBitCast(Replacement, ToReplace->getType());
1253  }
1254  ToReplace->replaceAllUsesWith(ActualReplacement);
1255  ToReplace->eraseFromParent();
1256 
1257  MadeChange = true;
1258  }
1259  return MadeChange;
1260 }
1261 
1262 // Turns this:
1263 //
1264 // %base = ...
1265 // %ptr = gep %base + 15
1266 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1267 // %base' = relocate(%tok, i32 4, i32 4)
1268 // %ptr' = relocate(%tok, i32 4, i32 5)
1269 // %val = load %ptr'
1270 //
1271 // into this:
1272 //
1273 // %base = ...
1274 // %ptr = gep %base + 15
1275 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1276 // %base' = gc.relocate(%tok, i32 4, i32 4)
1277 // %ptr' = gep %base' + 15
1278 // %val = load %ptr'
1279 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1280  bool MadeChange = false;
1281  SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1282  for (auto *U : I.users())
1283  if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1284  // Collect all the relocate calls associated with a statepoint
1285  AllRelocateCalls.push_back(Relocate);
1286 
1287  // We need at least one base pointer relocation + one derived pointer
1288  // relocation to mangle
1289  if (AllRelocateCalls.size() < 2)
1290  return false;
1291 
1292  // RelocateInstMap is a mapping from the base relocate instruction to the
1293  // corresponding derived relocate instructions
1295  computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1296  if (RelocateInstMap.empty())
1297  return false;
1298 
1299  for (auto &Item : RelocateInstMap)
1300  // Item.first is the RelocatedBase to offset against
1301  // Item.second is the vector of Targets to replace
1302  MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1303  return MadeChange;
1304 }
1305 
1306 /// Sink the specified cast instruction into its user blocks.
1307 static bool SinkCast(CastInst *CI) {
1308  BasicBlock *DefBB = CI->getParent();
1309 
1310  /// InsertedCasts - Only insert a cast in each block once.
1311  DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1312 
1313  bool MadeChange = false;
1314  for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1315  UI != E;) {
1316  Use &TheUse = UI.getUse();
1317  Instruction *User = cast<Instruction>(*UI);
1318 
1319  // Figure out which BB this cast is used in. For PHI's this is the
1320  // appropriate predecessor block.
1321  BasicBlock *UserBB = User->getParent();
1322  if (PHINode *PN = dyn_cast<PHINode>(User)) {
1323  UserBB = PN->getIncomingBlock(TheUse);
1324  }
1325 
1326  // Preincrement use iterator so we don't invalidate it.
1327  ++UI;
1328 
1329  // The first insertion point of a block containing an EH pad is after the
1330  // pad. If the pad is the user, we cannot sink the cast past the pad.
1331  if (User->isEHPad())
1332  continue;
1333 
1334  // If the block selected to receive the cast is an EH pad that does not
1335  // allow non-PHI instructions before the terminator, we can't sink the
1336  // cast.
1337  if (UserBB->getTerminator()->isEHPad())
1338  continue;
1339 
1340  // If this user is in the same block as the cast, don't change the cast.
1341  if (UserBB == DefBB)
1342  continue;
1343 
1344  // If we have already inserted a cast into this block, use it.
1345  CastInst *&InsertedCast = InsertedCasts[UserBB];
1346 
1347  if (!InsertedCast) {
1348  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1349  assert(InsertPt != UserBB->end());
1350  InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1351  CI->getType(), "", &*InsertPt);
1352  InsertedCast->setDebugLoc(CI->getDebugLoc());
1353  }
1354 
1355  // Replace a use of the cast with a use of the new cast.
1356  TheUse = InsertedCast;
1357  MadeChange = true;
1358  ++NumCastUses;
1359  }
1360 
1361  // If we removed all uses, nuke the cast.
1362  if (CI->use_empty()) {
1363  salvageDebugInfo(*CI);
1364  CI->eraseFromParent();
1365  MadeChange = true;
1366  }
1367 
1368  return MadeChange;
1369 }
1370 
1371 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1372 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1373 /// reduce the number of virtual registers that must be created and coalesced.
1374 ///
1375 /// Return true if any changes are made.
1377  const DataLayout &DL) {
1378  // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1379  // than sinking only nop casts, but is helpful on some platforms.
1380  if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1381  if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1382  ASC->getDestAddressSpace()))
1383  return false;
1384  }
1385 
1386  // If this is a noop copy,
1387  EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1388  EVT DstVT = TLI.getValueType(DL, CI->getType());
1389 
1390  // This is an fp<->int conversion?
1391  if (SrcVT.isInteger() != DstVT.isInteger())
1392  return false;
1393 
1394  // If this is an extension, it will be a zero or sign extension, which
1395  // isn't a noop.
1396  if (SrcVT.bitsLT(DstVT))
1397  return false;
1398 
1399  // If these values will be promoted, find out what they will be promoted
1400  // to. This helps us consider truncates on PPC as noop copies when they
1401  // are.
1402  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1404  SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1405  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1407  DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1408 
1409  // If, after promotion, these are the same types, this is a noop copy.
1410  if (SrcVT != DstVT)
1411  return false;
1412 
1413  return SinkCast(CI);
1414 }
1415 
1416 // Match a simple increment by constant operation. Note that if a sub is
1417 // matched, the step is negated (as if the step had been canonicalized to
1418 // an add, even though we leave the instruction alone.)
1420  Constant *&Step) {
1421  if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1422  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1423  m_Instruction(LHS), m_Constant(Step)))))
1424  return true;
1425  if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1426  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1427  m_Instruction(LHS), m_Constant(Step))))) {
1428  Step = ConstantExpr::getNeg(Step);
1429  return true;
1430  }
1431  return false;
1432 }
1433 
1434 /// If given \p PN is an inductive variable with value IVInc coming from the
1435 /// backedge, and on each iteration it gets increased by Step, return pair
1436 /// <IVInc, Step>. Otherwise, return None.
1437 static std::optional<std::pair<Instruction *, Constant *>>
1438 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1439  const Loop *L = LI->getLoopFor(PN->getParent());
1440  if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1441  return None;
1442  auto *IVInc =
1443  dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1444  if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1445  return None;
1446  Instruction *LHS = nullptr;
1447  Constant *Step = nullptr;
1448  if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1449  return std::make_pair(IVInc, Step);
1450  return None;
1451 }
1452 
1453 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1454  auto *I = dyn_cast<Instruction>(V);
1455  if (!I)
1456  return false;
1457  Instruction *LHS = nullptr;
1458  Constant *Step = nullptr;
1459  if (!matchIncrement(I, LHS, Step))
1460  return false;
1461  if (auto *PN = dyn_cast<PHINode>(LHS))
1462  if (auto IVInc = getIVIncrement(PN, LI))
1463  return IVInc->first == I;
1464  return false;
1465 }
1466 
1467 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1468  Value *Arg0, Value *Arg1,
1469  CmpInst *Cmp,
1470  Intrinsic::ID IID) {
1471  auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1472  if (!isIVIncrement(BO, LI))
1473  return false;
1474  const Loop *L = LI->getLoopFor(BO->getParent());
1475  assert(L && "L should not be null after isIVIncrement()");
1476  // Do not risk on moving increment into a child loop.
1477  if (LI->getLoopFor(Cmp->getParent()) != L)
1478  return false;
1479 
1480  // Finally, we need to ensure that the insert point will dominate all
1481  // existing uses of the increment.
1482 
1483  auto &DT = getDT(*BO->getParent()->getParent());
1484  if (DT.dominates(Cmp->getParent(), BO->getParent()))
1485  // If we're moving up the dom tree, all uses are trivially dominated.
1486  // (This is the common case for code produced by LSR.)
1487  return true;
1488 
1489  // Otherwise, special case the single use in the phi recurrence.
1490  return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1491  };
1492  if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1493  // We used to use a dominator tree here to allow multi-block optimization.
1494  // But that was problematic because:
1495  // 1. It could cause a perf regression by hoisting the math op into the
1496  // critical path.
1497  // 2. It could cause a perf regression by creating a value that was live
1498  // across multiple blocks and increasing register pressure.
1499  // 3. Use of a dominator tree could cause large compile-time regression.
1500  // This is because we recompute the DT on every change in the main CGP
1501  // run-loop. The recomputing is probably unnecessary in many cases, so if
1502  // that was fixed, using a DT here would be ok.
1503  //
1504  // There is one important particular case we still want to handle: if BO is
1505  // the IV increment. Important properties that make it profitable:
1506  // - We can speculate IV increment anywhere in the loop (as long as the
1507  // indvar Phi is its only user);
1508  // - Upon computing Cmp, we effectively compute something equivalent to the
1509  // IV increment (despite it loops differently in the IR). So moving it up
1510  // to the cmp point does not really increase register pressure.
1511  return false;
1512  }
1513 
1514  // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1515  if (BO->getOpcode() == Instruction::Add &&
1516  IID == Intrinsic::usub_with_overflow) {
1517  assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1518  Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1519  }
1520 
1521  // Insert at the first instruction of the pair.
1522  Instruction *InsertPt = nullptr;
1523  for (Instruction &Iter : *Cmp->getParent()) {
1524  // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1525  // the overflow intrinsic are defined.
1526  if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1527  InsertPt = &Iter;
1528  break;
1529  }
1530  }
1531  assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1532 
1533  IRBuilder<> Builder(InsertPt);
1534  Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1535  if (BO->getOpcode() != Instruction::Xor) {
1536  Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1537  replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1538  } else
1539  assert(BO->hasOneUse() &&
1540  "Patterns with XOr should use the BO only in the compare");
1541  Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1542  replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1543  Cmp->eraseFromParent();
1544  BO->eraseFromParent();
1545  return true;
1546 }
1547 
1548 /// Match special-case patterns that check for unsigned add overflow.
1550  BinaryOperator *&Add) {
1551  // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1552  // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1553  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1554 
1555  // We are not expecting non-canonical/degenerate code. Just bail out.
1556  if (isa<Constant>(A))
1557  return false;
1558 
1559  ICmpInst::Predicate Pred = Cmp->getPredicate();
1560  if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1561  B = ConstantInt::get(B->getType(), 1);
1562  else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1563  B = ConstantInt::get(B->getType(), -1);
1564  else
1565  return false;
1566 
1567  // Check the users of the variable operand of the compare looking for an add
1568  // with the adjusted constant.
1569  for (User *U : A->users()) {
1570  if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1571  Add = cast<BinaryOperator>(U);
1572  return true;
1573  }
1574  }
1575  return false;
1576 }
1577 
1578 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1579 /// intrinsic. Return true if any changes were made.
1580 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1581  ModifyDT &ModifiedDT) {
1582  Value *A, *B;
1584  if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1586  return false;
1587  // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1588  A = Add->getOperand(0);
1589  B = Add->getOperand(1);
1590  }
1591 
1592  if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1593  TLI->getValueType(*DL, Add->getType()),
1594  Add->hasNUsesOrMore(2)))
1595  return false;
1596 
1597  // We don't want to move around uses of condition values this late, so we
1598  // check if it is legal to create the call to the intrinsic in the basic
1599  // block containing the icmp.
1600  if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1601  return false;
1602 
1603  if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1604  Intrinsic::uadd_with_overflow))
1605  return false;
1606 
1607  // Reset callers - do not crash by iterating over a dead instruction.
1608  ModifiedDT = ModifyDT::ModifyInstDT;
1609  return true;
1610 }
1611 
1612 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1613  ModifyDT &ModifiedDT) {
1614  // We are not expecting non-canonical/degenerate code. Just bail out.
1615  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1616  if (isa<Constant>(A) && isa<Constant>(B))
1617  return false;
1618 
1619  // Convert (A u> B) to (A u< B) to simplify pattern matching.
1620  ICmpInst::Predicate Pred = Cmp->getPredicate();
1621  if (Pred == ICmpInst::ICMP_UGT) {
1622  std::swap(A, B);
1623  Pred = ICmpInst::ICMP_ULT;
1624  }
1625  // Convert special-case: (A == 0) is the same as (A u< 1).
1626  if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1627  B = ConstantInt::get(B->getType(), 1);
1628  Pred = ICmpInst::ICMP_ULT;
1629  }
1630  // Convert special-case: (A != 0) is the same as (0 u< A).
1631  if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1632  std::swap(A, B);
1633  Pred = ICmpInst::ICMP_ULT;
1634  }
1635  if (Pred != ICmpInst::ICMP_ULT)
1636  return false;
1637 
1638  // Walk the users of a variable operand of a compare looking for a subtract or
1639  // add with that same operand. Also match the 2nd operand of the compare to
1640  // the add/sub, but that may be a negated constant operand of an add.
1641  Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1642  BinaryOperator *Sub = nullptr;
1643  for (User *U : CmpVariableOperand->users()) {
1644  // A - B, A u< B --> usubo(A, B)
1645  if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1646  Sub = cast<BinaryOperator>(U);
1647  break;
1648  }
1649 
1650  // A + (-C), A u< C (canonicalized form of (sub A, C))
1651  const APInt *CmpC, *AddC;
1652  if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1653  match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1654  Sub = cast<BinaryOperator>(U);
1655  break;
1656  }
1657  }
1658  if (!Sub)
1659  return false;
1660 
1661  if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1662  TLI->getValueType(*DL, Sub->getType()),
1663  Sub->hasNUsesOrMore(2)))
1664  return false;
1665 
1666  if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1667  Cmp, Intrinsic::usub_with_overflow))
1668  return false;
1669 
1670  // Reset callers - do not crash by iterating over a dead instruction.
1671  ModifiedDT = ModifyDT::ModifyInstDT;
1672  return true;
1673 }
1674 
1675 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1676 /// registers that must be created and coalesced. This is a clear win except on
1677 /// targets with multiple condition code registers (PowerPC), where it might
1678 /// lose; some adjustment may be wanted there.
1679 ///
1680 /// Return true if any changes are made.
1681 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1683  return false;
1684 
1685  // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1686  if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1687  return false;
1688 
1689  // Only insert a cmp in each block once.
1690  DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1691 
1692  bool MadeChange = false;
1693  for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1694  UI != E;) {
1695  Use &TheUse = UI.getUse();
1696  Instruction *User = cast<Instruction>(*UI);
1697 
1698  // Preincrement use iterator so we don't invalidate it.
1699  ++UI;
1700 
1701  // Don't bother for PHI nodes.
1702  if (isa<PHINode>(User))
1703  continue;
1704 
1705  // Figure out which BB this cmp is used in.
1706  BasicBlock *UserBB = User->getParent();
1707  BasicBlock *DefBB = Cmp->getParent();
1708 
1709  // If this user is in the same block as the cmp, don't change the cmp.
1710  if (UserBB == DefBB)
1711  continue;
1712 
1713  // If we have already inserted a cmp into this block, use it.
1714  CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1715 
1716  if (!InsertedCmp) {
1717  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1718  assert(InsertPt != UserBB->end());
1719  InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1720  Cmp->getOperand(0), Cmp->getOperand(1), "",
1721  &*InsertPt);
1722  // Propagate the debug info.
1723  InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1724  }
1725 
1726  // Replace a use of the cmp with a use of the new cmp.
1727  TheUse = InsertedCmp;
1728  MadeChange = true;
1729  ++NumCmpUses;
1730  }
1731 
1732  // If we removed all uses, nuke the cmp.
1733  if (Cmp->use_empty()) {
1734  Cmp->eraseFromParent();
1735  MadeChange = true;
1736  }
1737 
1738  return MadeChange;
1739 }
1740 
1741 /// For pattern like:
1742 ///
1743 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1744 /// ...
1745 /// DomBB:
1746 /// ...
1747 /// br DomCond, TrueBB, CmpBB
1748 /// CmpBB: (with DomBB being the single predecessor)
1749 /// ...
1750 /// Cmp = icmp eq CmpOp0, CmpOp1
1751 /// ...
1752 ///
1753 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1754 /// different from lowering of icmp eq (PowerPC). This function try to convert
1755 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1756 /// After that, DomCond and Cmp can use the same comparison so reduce one
1757 /// comparison.
1758 ///
1759 /// Return true if any changes are made.
1761  const TargetLowering &TLI) {
1763  return false;
1764 
1765  ICmpInst::Predicate Pred = Cmp->getPredicate();
1766  if (Pred != ICmpInst::ICMP_EQ)
1767  return false;
1768 
1769  // If icmp eq has users other than BranchInst and SelectInst, converting it to
1770  // icmp slt/sgt would introduce more redundant LLVM IR.
1771  for (User *U : Cmp->users()) {
1772  if (isa<BranchInst>(U))
1773  continue;
1774  if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1775  continue;
1776  return false;
1777  }
1778 
1779  // This is a cheap/incomplete check for dominance - just match a single
1780  // predecessor with a conditional branch.
1781  BasicBlock *CmpBB = Cmp->getParent();
1782  BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1783  if (!DomBB)
1784  return false;
1785 
1786  // We want to ensure that the only way control gets to the comparison of
1787  // interest is that a less/greater than comparison on the same operands is
1788  // false.
1789  Value *DomCond;
1790  BasicBlock *TrueBB, *FalseBB;
1791  if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1792  return false;
1793  if (CmpBB != FalseBB)
1794  return false;
1795 
1796  Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1797  ICmpInst::Predicate DomPred;
1798  if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1799  return false;
1800  if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1801  return false;
1802 
1803  // Convert the equality comparison to the opposite of the dominating
1804  // comparison and swap the direction for all branch/select users.
1805  // We have conceptually converted:
1806  // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1807  // to
1808  // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1809  // And similarly for branches.
1810  for (User *U : Cmp->users()) {
1811  if (auto *BI = dyn_cast<BranchInst>(U)) {
1812  assert(BI->isConditional() && "Must be conditional");
1813  BI->swapSuccessors();
1814  continue;
1815  }
1816  if (auto *SI = dyn_cast<SelectInst>(U)) {
1817  // Swap operands
1818  SI->swapValues();
1819  SI->swapProfMetadata();
1820  continue;
1821  }
1822  llvm_unreachable("Must be a branch or a select");
1823  }
1824  Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1825  return true;
1826 }
1827 
1828 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
1829  if (sinkCmpExpression(Cmp, *TLI))
1830  return true;
1831 
1832  if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1833  return true;
1834 
1835  if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1836  return true;
1837 
1838  if (foldICmpWithDominatingICmp(Cmp, *TLI))
1839  return true;
1840 
1841  return false;
1842 }
1843 
1844 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1845 /// used in a compare to allow isel to generate better code for targets where
1846 /// this operation can be combined.
1847 ///
1848 /// Return true if any changes are made.
1849 static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
1850  SetOfInstrs &InsertedInsts) {
1851  // Double-check that we're not trying to optimize an instruction that was
1852  // already optimized by some other part of this pass.
1853  assert(!InsertedInsts.count(AndI) &&
1854  "Attempting to optimize already optimized and instruction");
1855  (void)InsertedInsts;
1856 
1857  // Nothing to do for single use in same basic block.
1858  if (AndI->hasOneUse() &&
1859  AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1860  return false;
1861 
1862  // Try to avoid cases where sinking/duplicating is likely to increase register
1863  // pressure.
1864  if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1865  !isa<ConstantInt>(AndI->getOperand(1)) &&
1866  AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1867  return false;
1868 
1869  for (auto *U : AndI->users()) {
1870  Instruction *User = cast<Instruction>(U);
1871 
1872  // Only sink 'and' feeding icmp with 0.
1873  if (!isa<ICmpInst>(User))
1874  return false;
1875 
1876  auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1877  if (!CmpC || !CmpC->isZero())
1878  return false;
1879  }
1880 
1881  if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1882  return false;
1883 
1884  LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1885  LLVM_DEBUG(AndI->getParent()->dump());
1886 
1887  // Push the 'and' into the same block as the icmp 0. There should only be
1888  // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1889  // others, so we don't need to keep track of which BBs we insert into.
1890  for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1891  UI != E;) {
1892  Use &TheUse = UI.getUse();
1893  Instruction *User = cast<Instruction>(*UI);
1894 
1895  // Preincrement use iterator so we don't invalidate it.
1896  ++UI;
1897 
1898  LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1899 
1900  // Keep the 'and' in the same place if the use is already in the same block.
1901  Instruction *InsertPt =
1902  User->getParent() == AndI->getParent() ? AndI : User;
1903  Instruction *InsertedAnd =
1904  BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1905  AndI->getOperand(1), "", InsertPt);
1906  // Propagate the debug info.
1907  InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1908 
1909  // Replace a use of the 'and' with a use of the new 'and'.
1910  TheUse = InsertedAnd;
1911  ++NumAndUses;
1912  LLVM_DEBUG(User->getParent()->dump());
1913  }
1914 
1915  // We removed all uses, nuke the and.
1916  AndI->eraseFromParent();
1917  return true;
1918 }
1919 
1920 /// Check if the candidates could be combined with a shift instruction, which
1921 /// includes:
1922 /// 1. Truncate instruction
1923 /// 2. And instruction and the imm is a mask of the low bits:
1924 /// imm & (imm+1) == 0
1926  if (!isa<TruncInst>(User)) {
1927  if (User->getOpcode() != Instruction::And ||
1928  !isa<ConstantInt>(User->getOperand(1)))
1929  return false;
1930 
1931  const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1932 
1933  if ((Cimm & (Cimm + 1)).getBoolValue())
1934  return false;
1935  }
1936  return true;
1937 }
1938 
1939 /// Sink both shift and truncate instruction to the use of truncate's BB.
1940 static bool
1943  const TargetLowering &TLI, const DataLayout &DL) {
1944  BasicBlock *UserBB = User->getParent();
1945  DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1946  auto *TruncI = cast<TruncInst>(User);
1947  bool MadeChange = false;
1948 
1949  for (Value::user_iterator TruncUI = TruncI->user_begin(),
1950  TruncE = TruncI->user_end();
1951  TruncUI != TruncE;) {
1952 
1953  Use &TruncTheUse = TruncUI.getUse();
1954  Instruction *TruncUser = cast<Instruction>(*TruncUI);
1955  // Preincrement use iterator so we don't invalidate it.
1956 
1957  ++TruncUI;
1958 
1959  int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1960  if (!ISDOpcode)
1961  continue;
1962 
1963  // If the use is actually a legal node, there will not be an
1964  // implicit truncate.
1965  // FIXME: always querying the result type is just an
1966  // approximation; some nodes' legality is determined by the
1967  // operand or other means. There's no good way to find out though.
1968  if (TLI.isOperationLegalOrCustom(
1969  ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1970  continue;
1971 
1972  // Don't bother for PHI nodes.
1973  if (isa<PHINode>(TruncUser))
1974  continue;
1975 
1976  BasicBlock *TruncUserBB = TruncUser->getParent();
1977 
1978  if (UserBB == TruncUserBB)
1979  continue;
1980 
1981  BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1982  CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1983 
1984  if (!InsertedShift && !InsertedTrunc) {
1985  BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1986  assert(InsertPt != TruncUserBB->end());
1987  // Sink the shift
1988  if (ShiftI->getOpcode() == Instruction::AShr)
1989  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1990  "", &*InsertPt);
1991  else
1992  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1993  "", &*InsertPt);
1994  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1995 
1996  // Sink the trunc
1997  BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1998  TruncInsertPt++;
1999  assert(TruncInsertPt != TruncUserBB->end());
2000 
2001  InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2002  TruncI->getType(), "", &*TruncInsertPt);
2003  InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2004 
2005  MadeChange = true;
2006 
2007  TruncTheUse = InsertedTrunc;
2008  }
2009  }
2010  return MadeChange;
2011 }
2012 
2013 /// Sink the shift *right* instruction into user blocks if the uses could
2014 /// potentially be combined with this shift instruction and generate BitExtract
2015 /// instruction. It will only be applied if the architecture supports BitExtract
2016 /// instruction. Here is an example:
2017 /// BB1:
2018 /// %x.extract.shift = lshr i64 %arg1, 32
2019 /// BB2:
2020 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
2021 /// ==>
2022 ///
2023 /// BB2:
2024 /// %x.extract.shift.1 = lshr i64 %arg1, 32
2025 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2026 ///
2027 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
2028 /// instruction.
2029 /// Return true if any changes are made.
2031  const TargetLowering &TLI,
2032  const DataLayout &DL) {
2033  BasicBlock *DefBB = ShiftI->getParent();
2034 
2035  /// Only insert instructions in each block once.
2037 
2038  bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2039 
2040  bool MadeChange = false;
2041  for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2042  UI != E;) {
2043  Use &TheUse = UI.getUse();
2044  Instruction *User = cast<Instruction>(*UI);
2045  // Preincrement use iterator so we don't invalidate it.
2046  ++UI;
2047 
2048  // Don't bother for PHI nodes.
2049  if (isa<PHINode>(User))
2050  continue;
2051 
2053  continue;
2054 
2055  BasicBlock *UserBB = User->getParent();
2056 
2057  if (UserBB == DefBB) {
2058  // If the shift and truncate instruction are in the same BB. The use of
2059  // the truncate(TruncUse) may still introduce another truncate if not
2060  // legal. In this case, we would like to sink both shift and truncate
2061  // instruction to the BB of TruncUse.
2062  // for example:
2063  // BB1:
2064  // i64 shift.result = lshr i64 opnd, imm
2065  // trunc.result = trunc shift.result to i16
2066  //
2067  // BB2:
2068  // ----> We will have an implicit truncate here if the architecture does
2069  // not have i16 compare.
2070  // cmp i16 trunc.result, opnd2
2071  //
2072  if (isa<TruncInst>(User) &&
2073  shiftIsLegal
2074  // If the type of the truncate is legal, no truncate will be
2075  // introduced in other basic blocks.
2076  && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2077  MadeChange =
2078  SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2079 
2080  continue;
2081  }
2082  // If we have already inserted a shift into this block, use it.
2083  BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2084 
2085  if (!InsertedShift) {
2086  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2087  assert(InsertPt != UserBB->end());
2088 
2089  if (ShiftI->getOpcode() == Instruction::AShr)
2090  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
2091  "", &*InsertPt);
2092  else
2093  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
2094  "", &*InsertPt);
2095  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2096 
2097  MadeChange = true;
2098  }
2099 
2100  // Replace a use of the shift with a use of the new shift.
2101  TheUse = InsertedShift;
2102  }
2103 
2104  // If we removed all uses, or there are none, nuke the shift.
2105  if (ShiftI->use_empty()) {
2106  salvageDebugInfo(*ShiftI);
2107  ShiftI->eraseFromParent();
2108  MadeChange = true;
2109  }
2110 
2111  return MadeChange;
2112 }
2113 
2114 /// If counting leading or trailing zeros is an expensive operation and a zero
2115 /// input is defined, add a check for zero to avoid calling the intrinsic.
2116 ///
2117 /// We want to transform:
2118 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2119 ///
2120 /// into:
2121 /// entry:
2122 /// %cmpz = icmp eq i64 %A, 0
2123 /// br i1 %cmpz, label %cond.end, label %cond.false
2124 /// cond.false:
2125 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2126 /// br label %cond.end
2127 /// cond.end:
2128 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2129 ///
2130 /// If the transform is performed, return true and set ModifiedDT to true.
2131 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2132  const TargetLowering *TLI,
2133  const DataLayout *DL, ModifyDT &ModifiedDT,
2134  SmallSet<BasicBlock *, 32> &FreshBBs,
2135  bool IsHugeFunc) {
2136  // If a zero input is undefined, it doesn't make sense to despeculate that.
2137  if (match(CountZeros->getOperand(1), m_One()))
2138  return false;
2139 
2140  // If it's cheap to speculate, there's nothing to do.
2141  Type *Ty = CountZeros->getType();
2142  auto IntrinsicID = CountZeros->getIntrinsicID();
2143  if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2144  (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2145  return false;
2146 
2147  // Only handle legal scalar cases. Anything else requires too much work.
2148  unsigned SizeInBits = Ty->getScalarSizeInBits();
2149  if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2150  return false;
2151 
2152  // Bail if the value is never zero.
2153  Use &Op = CountZeros->getOperandUse(0);
2154  if (isKnownNonZero(Op, *DL))
2155  return false;
2156 
2157  // The intrinsic will be sunk behind a compare against zero and branch.
2158  BasicBlock *StartBlock = CountZeros->getParent();
2159  BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2160  if (IsHugeFunc)
2161  FreshBBs.insert(CallBlock);
2162 
2163  // Create another block after the count zero intrinsic. A PHI will be added
2164  // in this block to select the result of the intrinsic or the bit-width
2165  // constant if the input to the intrinsic is zero.
2166  BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2167  BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2168  if (IsHugeFunc)
2169  FreshBBs.insert(EndBlock);
2170 
2171  // Set up a builder to create a compare, conditional branch, and PHI.
2172  IRBuilder<> Builder(CountZeros->getContext());
2173  Builder.SetInsertPoint(StartBlock->getTerminator());
2174  Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2175 
2176  // Replace the unconditional branch that was created by the first split with
2177  // a compare against zero and a conditional branch.
2178  Value *Zero = Constant::getNullValue(Ty);
2179  // Avoid introducing branch on poison. This also replaces the ctz operand.
2181  Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2182  Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2183  Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2184  StartBlock->getTerminator()->eraseFromParent();
2185 
2186  // Create a PHI in the end block to select either the output of the intrinsic
2187  // or the bit width of the operand.
2188  Builder.SetInsertPoint(&EndBlock->front());
2189  PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2190  replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2191  Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2192  PN->addIncoming(BitWidth, StartBlock);
2193  PN->addIncoming(CountZeros, CallBlock);
2194 
2195  // We are explicitly handling the zero case, so we can set the intrinsic's
2196  // undefined zero argument to 'true'. This will also prevent reprocessing the
2197  // intrinsic; we only despeculate when a zero input is defined.
2198  CountZeros->setArgOperand(1, Builder.getTrue());
2199  ModifiedDT = ModifyDT::ModifyBBDT;
2200  return true;
2201 }
2202 
2203 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2204  BasicBlock *BB = CI->getParent();
2205 
2206  // Lower inline assembly if we can.
2207  // If we found an inline asm expession, and if the target knows how to
2208  // lower it to normal LLVM code, do so now.
2209  if (CI->isInlineAsm()) {
2210  if (TLI->ExpandInlineAsm(CI)) {
2211  // Avoid invalidating the iterator.
2212  CurInstIterator = BB->begin();
2213  // Avoid processing instructions out of order, which could cause
2214  // reuse before a value is defined.
2215  SunkAddrs.clear();
2216  return true;
2217  }
2218  // Sink address computing for memory operands into the block.
2219  if (optimizeInlineAsmInst(CI))
2220  return true;
2221  }
2222 
2223  // Align the pointer arguments to this call if the target thinks it's a good
2224  // idea
2225  unsigned MinSize;
2226  Align PrefAlign;
2227  if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2228  for (auto &Arg : CI->args()) {
2229  // We want to align both objects whose address is used directly and
2230  // objects whose address is used in casts and GEPs, though it only makes
2231  // sense for GEPs if the offset is a multiple of the desired alignment and
2232  // if size - offset meets the size threshold.
2233  if (!Arg->getType()->isPointerTy())
2234  continue;
2235  APInt Offset(DL->getIndexSizeInBits(
2236  cast<PointerType>(Arg->getType())->getAddressSpace()),
2237  0);
2238  Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2239  uint64_t Offset2 = Offset.getLimitedValue();
2240  if (!isAligned(PrefAlign, Offset2))
2241  continue;
2242  AllocaInst *AI;
2243  if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2244  DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2245  AI->setAlignment(PrefAlign);
2246  // Global variables can only be aligned if they are defined in this
2247  // object (i.e. they are uniquely initialized in this object), and
2248  // over-aligning global variables that have an explicit section is
2249  // forbidden.
2250  GlobalVariable *GV;
2251  if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2252  GV->getPointerAlignment(*DL) < PrefAlign &&
2253  DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2254  GV->setAlignment(PrefAlign);
2255  }
2256  }
2257  // If this is a memcpy (or similar) then we may be able to improve the
2258  // alignment.
2259  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2260  Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2261  MaybeAlign MIDestAlign = MI->getDestAlign();
2262  if (!MIDestAlign || DestAlign > *MIDestAlign)
2263  MI->setDestAlignment(DestAlign);
2264  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2265  MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2266  Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2267  if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2268  MTI->setSourceAlignment(SrcAlign);
2269  }
2270  }
2271 
2272  // If we have a cold call site, try to sink addressing computation into the
2273  // cold block. This interacts with our handling for loads and stores to
2274  // ensure that we can fold all uses of a potential addressing computation
2275  // into their uses. TODO: generalize this to work over profiling data
2276  if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
2277  !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2278  for (auto &Arg : CI->args()) {
2279  if (!Arg->getType()->isPointerTy())
2280  continue;
2281  unsigned AS = Arg->getType()->getPointerAddressSpace();
2282  return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2283  }
2284 
2285  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2286  if (II) {
2287  switch (II->getIntrinsicID()) {
2288  default:
2289  break;
2290  case Intrinsic::assume:
2291  llvm_unreachable("llvm.assume should have been removed already");
2292  case Intrinsic::experimental_widenable_condition: {
2293  // Give up on future widening oppurtunties so that we can fold away dead
2294  // paths and merge blocks before going into block-local instruction
2295  // selection.
2296  if (II->use_empty()) {
2297  II->eraseFromParent();
2298  return true;
2299  }
2300  Constant *RetVal = ConstantInt::getTrue(II->getContext());
2301  resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2302  replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2303  });
2304  return true;
2305  }
2306  case Intrinsic::objectsize:
2307  llvm_unreachable("llvm.objectsize.* should have been lowered already");
2308  case Intrinsic::is_constant:
2309  llvm_unreachable("llvm.is.constant.* should have been lowered already");
2310  case Intrinsic::aarch64_stlxr:
2311  case Intrinsic::aarch64_stxr: {
2312  ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2313  if (!ExtVal || !ExtVal->hasOneUse() ||
2314  ExtVal->getParent() == CI->getParent())
2315  return false;
2316  // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2317  ExtVal->moveBefore(CI);
2318  // Mark this instruction as "inserted by CGP", so that other
2319  // optimizations don't touch it.
2320  InsertedInsts.insert(ExtVal);
2321  return true;
2322  }
2323 
2324  case Intrinsic::launder_invariant_group:
2325  case Intrinsic::strip_invariant_group: {
2326  Value *ArgVal = II->getArgOperand(0);
2327  auto it = LargeOffsetGEPMap.find(II);
2328  if (it != LargeOffsetGEPMap.end()) {
2329  // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2330  // Make sure not to have to deal with iterator invalidation
2331  // after possibly adding ArgVal to LargeOffsetGEPMap.
2332  auto GEPs = std::move(it->second);
2333  LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2334  LargeOffsetGEPMap.erase(II);
2335  }
2336 
2337  replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2338  II->eraseFromParent();
2339  return true;
2340  }
2341  case Intrinsic::cttz:
2342  case Intrinsic::ctlz:
2343  // If counting zeros is expensive, try to avoid it.
2344  return despeculateCountZeros(II, TLI, DL, ModifiedDT, FreshBBs,
2345  IsHugeFunc);
2346  case Intrinsic::fshl:
2347  case Intrinsic::fshr:
2348  return optimizeFunnelShift(II);
2349  case Intrinsic::dbg_assign:
2350  case Intrinsic::dbg_value:
2351  return fixupDbgValue(II);
2352  case Intrinsic::vscale: {
2353  // If datalayout has no special restrictions on vector data layout,
2354  // replace `llvm.vscale` by an equivalent constant expression
2355  // to benefit from cheap constant propagation.
2356  Type *ScalableVectorTy =
2357  VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2358  if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2359  auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2360  auto *One = ConstantInt::getSigned(II->getType(), 1);
2361  auto *CGep =
2362  ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2364  FreshBBs, IsHugeFunc);
2365  II->eraseFromParent();
2366  return true;
2367  }
2368  break;
2369  }
2370  case Intrinsic::masked_gather:
2371  return optimizeGatherScatterInst(II, II->getArgOperand(0));
2372  case Intrinsic::masked_scatter:
2373  return optimizeGatherScatterInst(II, II->getArgOperand(1));
2374  }
2375 
2376  SmallVector<Value *, 2> PtrOps;
2377  Type *AccessTy;
2378  if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2379  while (!PtrOps.empty()) {
2380  Value *PtrVal = PtrOps.pop_back_val();
2381  unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2382  if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2383  return true;
2384  }
2385  }
2386 
2387  // From here on out we're working with named functions.
2388  if (!CI->getCalledFunction())
2389  return false;
2390 
2391  // Lower all default uses of _chk calls. This is very similar
2392  // to what InstCombineCalls does, but here we are only lowering calls
2393  // to fortified library functions (e.g. __memcpy_chk) that have the default
2394  // "don't know" as the objectsize. Anything else should be left alone.
2395  FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2396  IRBuilder<> Builder(CI);
2397  if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2398  replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2399  CI->eraseFromParent();
2400  return true;
2401  }
2402 
2403  return false;
2404 }
2405 
2406 /// Look for opportunities to duplicate return instructions to the predecessor
2407 /// to enable tail call optimizations. The case it is currently looking for is:
2408 /// @code
2409 /// bb0:
2410 /// %tmp0 = tail call i32 @f0()
2411 /// br label %return
2412 /// bb1:
2413 /// %tmp1 = tail call i32 @f1()
2414 /// br label %return
2415 /// bb2:
2416 /// %tmp2 = tail call i32 @f2()
2417 /// br label %return
2418 /// return:
2419 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2420 /// ret i32 %retval
2421 /// @endcode
2422 ///
2423 /// =>
2424 ///
2425 /// @code
2426 /// bb0:
2427 /// %tmp0 = tail call i32 @f0()
2428 /// ret i32 %tmp0
2429 /// bb1:
2430 /// %tmp1 = tail call i32 @f1()
2431 /// ret i32 %tmp1
2432 /// bb2:
2433 /// %tmp2 = tail call i32 @f2()
2434 /// ret i32 %tmp2
2435 /// @endcode
2436 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2437  ModifyDT &ModifiedDT) {
2438  if (!BB->getTerminator())
2439  return false;
2440 
2441  ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2442  if (!RetI)
2443  return false;
2444 
2445  PHINode *PN = nullptr;
2446  ExtractValueInst *EVI = nullptr;
2447  BitCastInst *BCI = nullptr;
2448  Value *V = RetI->getReturnValue();
2449  if (V) {
2450  BCI = dyn_cast<BitCastInst>(V);
2451  if (BCI)
2452  V = BCI->getOperand(0);
2453 
2454  EVI = dyn_cast<ExtractValueInst>(V);
2455  if (EVI) {
2456  V = EVI->getOperand(0);
2457  if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2458  return false;
2459  }
2460 
2461  PN = dyn_cast<PHINode>(V);
2462  if (!PN)
2463  return false;
2464  }
2465 
2466  if (PN && PN->getParent() != BB)
2467  return false;
2468 
2469  auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2470  const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2471  if (BC && BC->hasOneUse())
2472  Inst = BC->user_back();
2473 
2474  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2475  return II->getIntrinsicID() == Intrinsic::lifetime_end;
2476  return false;
2477  };
2478 
2479  // Make sure there are no instructions between the first instruction
2480  // and return.
2481  const Instruction *BI = BB->getFirstNonPHI();
2482  // Skip over debug and the bitcast.
2483  while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2484  isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2485  BI = BI->getNextNode();
2486  if (BI != RetI)
2487  return false;
2488 
2489  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2490  /// call.
2491  const Function *F = BB->getParent();
2492  SmallVector<BasicBlock *, 4> TailCallBBs;
2493  if (PN) {
2494  for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2495  // Look through bitcasts.
2496  Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2497  CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2498  BasicBlock *PredBB = PN->getIncomingBlock(I);
2499  // Make sure the phi value is indeed produced by the tail call.
2500  if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2501  TLI->mayBeEmittedAsTailCall(CI) &&
2502  attributesPermitTailCall(F, CI, RetI, *TLI))
2503  TailCallBBs.push_back(PredBB);
2504  }
2505  } else {
2506  SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2507  for (BasicBlock *Pred : predecessors(BB)) {
2508  if (!VisitedBBs.insert(Pred).second)
2509  continue;
2510  if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2511  CallInst *CI = dyn_cast<CallInst>(I);
2512  if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2513  attributesPermitTailCall(F, CI, RetI, *TLI))
2514  TailCallBBs.push_back(Pred);
2515  }
2516  }
2517  }
2518 
2519  bool Changed = false;
2520  for (auto const &TailCallBB : TailCallBBs) {
2521  // Make sure the call instruction is followed by an unconditional branch to
2522  // the return block.
2523  BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2524  if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2525  continue;
2526 
2527  // Duplicate the return into TailCallBB.
2528  (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2530  BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2531  BFI->setBlockFreq(
2532  BB,
2533  (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2534  ModifiedDT = ModifyDT::ModifyBBDT;
2535  Changed = true;
2536  ++NumRetsDup;
2537  }
2538 
2539  // If we eliminated all predecessors of the block, delete the block now.
2540  if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2541  BB->eraseFromParent();
2542 
2543  return Changed;
2544 }
2545 
2546 //===----------------------------------------------------------------------===//
2547 // Memory Optimization
2548 //===----------------------------------------------------------------------===//
2549 
2550 namespace {
2551 
2552 /// This is an extended version of TargetLowering::AddrMode
2553 /// which holds actual Value*'s for register values.
2554 struct ExtAddrMode : public TargetLowering::AddrMode {
2555  Value *BaseReg = nullptr;
2556  Value *ScaledReg = nullptr;
2557  Value *OriginalValue = nullptr;
2558  bool InBounds = true;
2559 
2560  enum FieldName {
2561  NoField = 0x00,
2562  BaseRegField = 0x01,
2563  BaseGVField = 0x02,
2564  BaseOffsField = 0x04,
2565  ScaledRegField = 0x08,
2566  ScaleField = 0x10,
2567  MultipleFields = 0xff
2568  };
2569 
2570  ExtAddrMode() = default;
2571 
2572  void print(raw_ostream &OS) const;
2573  void dump() const;
2574 
2575  FieldName compare(const ExtAddrMode &other) {
2576  // First check that the types are the same on each field, as differing types
2577  // is something we can't cope with later on.
2578  if (BaseReg && other.BaseReg &&
2579  BaseReg->getType() != other.BaseReg->getType())
2580  return MultipleFields;
2581  if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
2582  return MultipleFields;
2583  if (ScaledReg && other.ScaledReg &&
2584  ScaledReg->getType() != other.ScaledReg->getType())
2585  return MultipleFields;
2586 
2587  // Conservatively reject 'inbounds' mismatches.
2588  if (InBounds != other.InBounds)
2589  return MultipleFields;
2590 
2591  // Check each field to see if it differs.
2592  unsigned Result = NoField;
2593  if (BaseReg != other.BaseReg)
2594  Result |= BaseRegField;
2595  if (BaseGV != other.BaseGV)
2596  Result |= BaseGVField;
2597  if (BaseOffs != other.BaseOffs)
2598  Result |= BaseOffsField;
2599  if (ScaledReg != other.ScaledReg)
2600  Result |= ScaledRegField;
2601  // Don't count 0 as being a different scale, because that actually means
2602  // unscaled (which will already be counted by having no ScaledReg).
2603  if (Scale && other.Scale && Scale != other.Scale)
2604  Result |= ScaleField;
2605 
2606  if (countPopulation(Result) > 1)
2607  return MultipleFields;
2608  else
2609  return static_cast<FieldName>(Result);
2610  }
2611 
2612  // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2613  // with no offset.
2614  bool isTrivial() {
2615  // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2616  // trivial if at most one of these terms is nonzero, except that BaseGV and
2617  // BaseReg both being zero actually means a null pointer value, which we
2618  // consider to be 'non-zero' here.
2619  return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2620  }
2621 
2622  Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2623  switch (Field) {
2624  default:
2625  return nullptr;
2626  case BaseRegField:
2627  return BaseReg;
2628  case BaseGVField:
2629  return BaseGV;
2630  case ScaledRegField:
2631  return ScaledReg;
2632  case BaseOffsField:
2633  return ConstantInt::get(IntPtrTy, BaseOffs);
2634  }
2635  }
2636 
2637  void SetCombinedField(FieldName Field, Value *V,
2638  const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2639  switch (Field) {
2640  default:
2641  llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2642  break;
2643  case ExtAddrMode::BaseRegField:
2644  BaseReg = V;
2645  break;
2646  case ExtAddrMode::BaseGVField:
2647  // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2648  // in the BaseReg field.
2649  assert(BaseReg == nullptr);
2650  BaseReg = V;
2651  BaseGV = nullptr;
2652  break;
2653  case ExtAddrMode::ScaledRegField:
2654  ScaledReg = V;
2655  // If we have a mix of scaled and unscaled addrmodes then we want scale
2656  // to be the scale and not zero.
2657  if (!Scale)
2658  for (const ExtAddrMode &AM : AddrModes)
2659  if (AM.Scale) {
2660  Scale = AM.Scale;
2661  break;
2662  }
2663  break;
2664  case ExtAddrMode::BaseOffsField:
2665  // The offset is no longer a constant, so it goes in ScaledReg with a
2666  // scale of 1.
2667  assert(ScaledReg == nullptr);
2668  ScaledReg = V;
2669  Scale = 1;
2670  BaseOffs = 0;
2671  break;
2672  }
2673  }
2674 };
2675 
2676 #ifndef NDEBUG
2677 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2678  AM.print(OS);
2679  return OS;
2680 }
2681 #endif
2682 
2683 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2684 void ExtAddrMode::print(raw_ostream &OS) const {
2685  bool NeedPlus = false;
2686  OS << "[";
2687  if (InBounds)
2688  OS << "inbounds ";
2689  if (BaseGV) {
2690  OS << (NeedPlus ? " + " : "") << "GV:";
2691  BaseGV->printAsOperand(OS, /*PrintType=*/false);
2692  NeedPlus = true;
2693  }
2694 
2695  if (BaseOffs) {
2696  OS << (NeedPlus ? " + " : "") << BaseOffs;
2697  NeedPlus = true;
2698  }
2699 
2700  if (BaseReg) {
2701  OS << (NeedPlus ? " + " : "") << "Base:";
2702  BaseReg->printAsOperand(OS, /*PrintType=*/false);
2703  NeedPlus = true;
2704  }
2705  if (Scale) {
2706  OS << (NeedPlus ? " + " : "") << Scale << "*";
2707  ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2708  }
2709 
2710  OS << ']';
2711 }
2712 
2713 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2714  print(dbgs());
2715  dbgs() << '\n';
2716 }
2717 #endif
2718 
2719 } // end anonymous namespace
2720 
2721 namespace {
2722 
2723 /// This class provides transaction based operation on the IR.
2724 /// Every change made through this class is recorded in the internal state and
2725 /// can be undone (rollback) until commit is called.
2726 /// CGP does not check if instructions could be speculatively executed when
2727 /// moved. Preserving the original location would pessimize the debugging
2728 /// experience, as well as negatively impact the quality of sample PGO.
2729 class TypePromotionTransaction {
2730  /// This represents the common interface of the individual transaction.
2731  /// Each class implements the logic for doing one specific modification on
2732  /// the IR via the TypePromotionTransaction.
2733  class TypePromotionAction {
2734  protected:
2735  /// The Instruction modified.
2736  Instruction *Inst;
2737 
2738  public:
2739  /// Constructor of the action.
2740  /// The constructor performs the related action on the IR.
2741  TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2742 
2743  virtual ~TypePromotionAction() = default;
2744 
2745  /// Undo the modification done by this action.
2746  /// When this method is called, the IR must be in the same state as it was
2747  /// before this action was applied.
2748  /// \pre Undoing the action works if and only if the IR is in the exact same
2749  /// state as it was directly after this action was applied.
2750  virtual void undo() = 0;
2751 
2752  /// Advocate every change made by this action.
2753  /// When the results on the IR of the action are to be kept, it is important
2754  /// to call this function, otherwise hidden information may be kept forever.
2755  virtual void commit() {
2756  // Nothing to be done, this action is not doing anything.
2757  }
2758  };
2759 
2760  /// Utility to remember the position of an instruction.
2761  class InsertionHandler {
2762  /// Position of an instruction.
2763  /// Either an instruction:
2764  /// - Is the first in a basic block: BB is used.
2765  /// - Has a previous instruction: PrevInst is used.
2766  union {
2767  Instruction *PrevInst;
2768  BasicBlock *BB;
2769  } Point;
2770 
2771  /// Remember whether or not the instruction had a previous instruction.
2772  bool HasPrevInstruction;
2773 
2774  public:
2775  /// Record the position of \p Inst.
2776  InsertionHandler(Instruction *Inst) {
2777  BasicBlock::iterator It = Inst->getIterator();
2778  HasPrevInstruction = (It != (Inst->getParent()->begin()));
2779  if (HasPrevInstruction)
2780  Point.PrevInst = &*--It;
2781  else
2782  Point.BB = Inst->getParent();
2783  }
2784 
2785  /// Insert \p Inst at the recorded position.
2786  void insert(Instruction *Inst) {
2787  if (HasPrevInstruction) {
2788  if (Inst->getParent())
2789  Inst->removeFromParent();
2790  Inst->insertAfter(Point.PrevInst);
2791  } else {
2792  Instruction *Position = &*Point.BB->getFirstInsertionPt();
2793  if (Inst->getParent())
2794  Inst->moveBefore(Position);
2795  else
2796  Inst->insertBefore(Position);
2797  }
2798  }
2799  };
2800 
2801  /// Move an instruction before another.
2802  class InstructionMoveBefore : public TypePromotionAction {
2803  /// Original position of the instruction.
2804  InsertionHandler Position;
2805 
2806  public:
2807  /// Move \p Inst before \p Before.
2808  InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2809  : TypePromotionAction(Inst), Position(Inst) {
2810  LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2811  << "\n");
2812  Inst->moveBefore(Before);
2813  }
2814 
2815  /// Move the instruction back to its original position.
2816  void undo() override {
2817  LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2818  Position.insert(Inst);
2819  }
2820  };
2821 
2822  /// Set the operand of an instruction with a new value.
2823  class OperandSetter : public TypePromotionAction {
2824  /// Original operand of the instruction.
2825  Value *Origin;
2826 
2827  /// Index of the modified instruction.
2828  unsigned Idx;
2829 
2830  public:
2831  /// Set \p Idx operand of \p Inst with \p NewVal.
2832  OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2833  : TypePromotionAction(Inst), Idx(Idx) {
2834  LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2835  << "for:" << *Inst << "\n"
2836  << "with:" << *NewVal << "\n");
2837  Origin = Inst->getOperand(Idx);
2838  Inst->setOperand(Idx, NewVal);
2839  }
2840 
2841  /// Restore the original value of the instruction.
2842  void undo() override {
2843  LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2844  << "for: " << *Inst << "\n"
2845  << "with: " << *Origin << "\n");
2846  Inst->setOperand(Idx, Origin);
2847  }
2848  };
2849 
2850  /// Hide the operands of an instruction.
2851  /// Do as if this instruction was not using any of its operands.
2852  class OperandsHider : public TypePromotionAction {
2853  /// The list of original operands.
2854  SmallVector<Value *, 4> OriginalValues;
2855 
2856  public:
2857  /// Remove \p Inst from the uses of the operands of \p Inst.
2858  OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2859  LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2860  unsigned NumOpnds = Inst->getNumOperands();
2861  OriginalValues.reserve(NumOpnds);
2862  for (unsigned It = 0; It < NumOpnds; ++It) {
2863  // Save the current operand.
2864  Value *Val = Inst->getOperand(It);
2865  OriginalValues.push_back(Val);
2866  // Set a dummy one.
2867  // We could use OperandSetter here, but that would imply an overhead
2868  // that we are not willing to pay.
2869  Inst->setOperand(It, UndefValue::get(Val->getType()));
2870  }
2871  }
2872 
2873  /// Restore the original list of uses.
2874  void undo() override {
2875  LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2876  for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2877  Inst->setOperand(It, OriginalValues[It]);
2878  }
2879  };
2880 
2881  /// Build a truncate instruction.
2882  class TruncBuilder : public TypePromotionAction {
2883  Value *Val;
2884 
2885  public:
2886  /// Build a truncate instruction of \p Opnd producing a \p Ty
2887  /// result.
2888  /// trunc Opnd to Ty.
2889  TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2890  IRBuilder<> Builder(Opnd);
2891  Builder.SetCurrentDebugLocation(DebugLoc());
2892  Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2893  LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2894  }
2895 
2896  /// Get the built value.
2897  Value *getBuiltValue() { return Val; }
2898 
2899  /// Remove the built instruction.
2900  void undo() override {
2901  LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2902  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2903  IVal->eraseFromParent();
2904  }
2905  };
2906 
2907  /// Build a sign extension instruction.
2908  class SExtBuilder : public TypePromotionAction {
2909  Value *Val;
2910 
2911  public:
2912  /// Build a sign extension instruction of \p Opnd producing a \p Ty
2913  /// result.
2914  /// sext Opnd to Ty.
2915  SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2916  : TypePromotionAction(InsertPt) {
2917  IRBuilder<> Builder(InsertPt);
2918  Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2919  LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2920  }
2921 
2922  /// Get the built value.
2923  Value *getBuiltValue() { return Val; }
2924 
2925  /// Remove the built instruction.
2926  void undo() override {
2927  LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2928  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2929  IVal->eraseFromParent();
2930  }
2931  };
2932 
2933  /// Build a zero extension instruction.
2934  class ZExtBuilder : public TypePromotionAction {
2935  Value *Val;
2936 
2937  public:
2938  /// Build a zero extension instruction of \p Opnd producing a \p Ty
2939  /// result.
2940  /// zext Opnd to Ty.
2941  ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2942  : TypePromotionAction(InsertPt) {
2943  IRBuilder<> Builder(InsertPt);
2944  Builder.SetCurrentDebugLocation(DebugLoc());
2945  Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2946  LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2947  }
2948 
2949  /// Get the built value.
2950  Value *getBuiltValue() { return Val; }
2951 
2952  /// Remove the built instruction.
2953  void undo() override {
2954  LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2955  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2956  IVal->eraseFromParent();
2957  }
2958  };
2959 
2960  /// Mutate an instruction to another type.
2961  class TypeMutator : public TypePromotionAction {
2962  /// Record the original type.
2963  Type *OrigTy;
2964 
2965  public:
2966  /// Mutate the type of \p Inst into \p NewTy.
2967  TypeMutator(Instruction *Inst, Type *NewTy)
2968  : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2969  LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2970  << "\n");
2971  Inst->mutateType(NewTy);
2972  }
2973 
2974  /// Mutate the instruction back to its original type.
2975  void undo() override {
2976  LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2977  << "\n");
2978  Inst->mutateType(OrigTy);
2979  }
2980  };
2981 
2982  /// Replace the uses of an instruction by another instruction.
2983  class UsesReplacer : public TypePromotionAction {
2984  /// Helper structure to keep track of the replaced uses.
2985  struct InstructionAndIdx {
2986  /// The instruction using the instruction.
2987  Instruction *Inst;
2988 
2989  /// The index where this instruction is used for Inst.
2990  unsigned Idx;
2991 
2992  InstructionAndIdx(Instruction *Inst, unsigned Idx)
2993  : Inst(Inst), Idx(Idx) {}
2994  };
2995 
2996  /// Keep track of the original uses (pair Instruction, Index).
2997  SmallVector<InstructionAndIdx, 4> OriginalUses;
2998  /// Keep track of the debug users.
3000 
3001  /// Keep track of the new value so that we can undo it by replacing
3002  /// instances of the new value with the original value.
3003  Value *New;
3004 
3005  using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3006 
3007  public:
3008  /// Replace all the use of \p Inst by \p New.
3009  UsesReplacer(Instruction *Inst, Value *New)
3010  : TypePromotionAction(Inst), New(New) {
3011  LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3012  << "\n");
3013  // Record the original uses.
3014  for (Use &U : Inst->uses()) {
3015  Instruction *UserI = cast<Instruction>(U.getUser());
3016  OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3017  }
3018  // Record the debug uses separately. They are not in the instruction's
3019  // use list, but they are replaced by RAUW.
3020  findDbgValues(DbgValues, Inst);
3021 
3022  // Now, we can replace the uses.
3023  Inst->replaceAllUsesWith(New);
3024  }
3025 
3026  /// Reassign the original uses of Inst to Inst.
3027  void undo() override {
3028  LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3029  for (InstructionAndIdx &Use : OriginalUses)
3030  Use.Inst->setOperand(Use.Idx, Inst);
3031  // RAUW has replaced all original uses with references to the new value,
3032  // including the debug uses. Since we are undoing the replacements,
3033  // the original debug uses must also be reinstated to maintain the
3034  // correctness and utility of debug value instructions.
3035  for (auto *DVI : DbgValues)
3036  DVI->replaceVariableLocationOp(New, Inst);
3037  }
3038  };
3039 
3040  /// Remove an instruction from the IR.
3041  class InstructionRemover : public TypePromotionAction {
3042  /// Original position of the instruction.
3043  InsertionHandler Inserter;
3044 
3045  /// Helper structure to hide all the link to the instruction. In other
3046  /// words, this helps to do as if the instruction was removed.
3047  OperandsHider Hider;
3048 
3049  /// Keep track of the uses replaced, if any.
3050  UsesReplacer *Replacer = nullptr;
3051 
3052  /// Keep track of instructions removed.
3053  SetOfInstrs &RemovedInsts;
3054 
3055  public:
3056  /// Remove all reference of \p Inst and optionally replace all its
3057  /// uses with New.
3058  /// \p RemovedInsts Keep track of the instructions removed by this Action.
3059  /// \pre If !Inst->use_empty(), then New != nullptr
3060  InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3061  Value *New = nullptr)
3062  : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3063  RemovedInsts(RemovedInsts) {
3064  if (New)
3065  Replacer = new UsesReplacer(Inst, New);
3066  LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3067  RemovedInsts.insert(Inst);
3068  /// The instructions removed here will be freed after completing
3069  /// optimizeBlock() for all blocks as we need to keep track of the
3070  /// removed instructions during promotion.
3071  Inst->removeFromParent();
3072  }
3073 
3074  ~InstructionRemover() override { delete Replacer; }
3075 
3076  /// Resurrect the instruction and reassign it to the proper uses if
3077  /// new value was provided when build this action.
3078  void undo() override {
3079  LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3080  Inserter.insert(Inst);
3081  if (Replacer)
3082  Replacer->undo();
3083  Hider.undo();
3084  RemovedInsts.erase(Inst);
3085  }
3086  };
3087 
3088 public:
3089  /// Restoration point.
3090  /// The restoration point is a pointer to an action instead of an iterator
3091  /// because the iterator may be invalidated but not the pointer.
3092  using ConstRestorationPt = const TypePromotionAction *;
3093 
3094  TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3095  : RemovedInsts(RemovedInsts) {}
3096 
3097  /// Advocate every changes made in that transaction. Return true if any change
3098  /// happen.
3099  bool commit();
3100 
3101  /// Undo all the changes made after the given point.
3102  void rollback(ConstRestorationPt Point);
3103 
3104  /// Get the current restoration point.
3105  ConstRestorationPt getRestorationPoint() const;
3106 
3107  /// \name API for IR modification with state keeping to support rollback.
3108  /// @{
3109  /// Same as Instruction::setOperand.
3110  void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3111 
3112  /// Same as Instruction::eraseFromParent.
3113  void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3114 
3115  /// Same as Value::replaceAllUsesWith.
3116  void replaceAllUsesWith(Instruction *Inst, Value *New);
3117 
3118  /// Same as Value::mutateType.
3119  void mutateType(Instruction *Inst, Type *NewTy);
3120 
3121  /// Same as IRBuilder::createTrunc.
3122  Value *createTrunc(Instruction *Opnd, Type *Ty);
3123 
3124  /// Same as IRBuilder::createSExt.
3125  Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3126 
3127  /// Same as IRBuilder::createZExt.
3128  Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3129 
3130  /// Same as Instruction::moveBefore.
3131  void moveBefore(Instruction *Inst, Instruction *Before);
3132  /// @}
3133 
3134 private:
3135  /// The ordered list of actions made so far.
3137 
3138  using CommitPt =
3140 
3141  SetOfInstrs &RemovedInsts;
3142 };
3143 
3144 } // end anonymous namespace
3145 
3146 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3147  Value *NewVal) {
3148  Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3149  Inst, Idx, NewVal));
3150 }
3151 
3153  Value *NewVal) {
3154  Actions.push_back(
3155  std::make_unique<TypePromotionTransaction::InstructionRemover>(
3156  Inst, RemovedInsts, NewVal));
3157 }
3158 
3160  Value *New) {
3161  Actions.push_back(
3162  std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3163 }
3164 
3165 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3166  Actions.push_back(
3167  std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3168 }
3169 
3170 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3171  std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3172  Value *Val = Ptr->getBuiltValue();
3173  Actions.push_back(std::move(Ptr));
3174  return Val;
3175 }
3176 
3177 Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3178  Type *Ty) {
3179  std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3180  Value *Val = Ptr->getBuiltValue();
3181  Actions.push_back(std::move(Ptr));
3182  return Val;
3183 }
3184 
3185 Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3186  Type *Ty) {
3187  std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3188  Value *Val = Ptr->getBuiltValue();
3189  Actions.push_back(std::move(Ptr));
3190  return Val;
3191 }
3192 
3193 void TypePromotionTransaction::moveBefore(Instruction *Inst,
3194  Instruction *Before) {
3195  Actions.push_back(
3196  std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
3197  Inst, Before));
3198 }
3199 
3200 TypePromotionTransaction::ConstRestorationPt
3201 TypePromotionTransaction::getRestorationPoint() const {
3202  return !Actions.empty() ? Actions.back().get() : nullptr;
3203 }
3204 
3205 bool TypePromotionTransaction::commit() {
3206  for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3207  Action->commit();
3208  bool Modified = !Actions.empty();
3209  Actions.clear();
3210  return Modified;
3211 }
3212 
3213 void TypePromotionTransaction::rollback(
3214  TypePromotionTransaction::ConstRestorationPt Point) {
3215  while (!Actions.empty() && Point != Actions.back().get()) {
3216  std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3217  Curr->undo();
3218  }
3219 }
3220 
3221 namespace {
3222 
3223 /// A helper class for matching addressing modes.
3224 ///
3225 /// This encapsulates the logic for matching the target-legal addressing modes.
3226 class AddressingModeMatcher {
3227  SmallVectorImpl<Instruction *> &AddrModeInsts;
3228  const TargetLowering &TLI;
3229  const TargetRegisterInfo &TRI;
3230  const DataLayout &DL;
3231  const LoopInfo &LI;
3232  const std::function<const DominatorTree &()> getDTFn;
3233 
3234  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3235  /// the memory instruction that we're computing this address for.
3236  Type *AccessTy;
3237  unsigned AddrSpace;
3238  Instruction *MemoryInst;
3239 
3240  /// This is the addressing mode that we're building up. This is
3241  /// part of the return value of this addressing mode matching stuff.
3243 
3244  /// The instructions inserted by other CodeGenPrepare optimizations.
3245  const SetOfInstrs &InsertedInsts;
3246 
3247  /// A map from the instructions to their type before promotion.
3248  InstrToOrigTy &PromotedInsts;
3249 
3250  /// The ongoing transaction where every action should be registered.
3251  TypePromotionTransaction &TPT;
3252 
3253  // A GEP which has too large offset to be folded into the addressing mode.
3254  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3255 
3256  /// This is set to true when we should not do profitability checks.
3257  /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3258  bool IgnoreProfitability;
3259 
3260  /// True if we are optimizing for size.
3261  bool OptSize;
3262 
3263  ProfileSummaryInfo *PSI;
3265 
3266  AddressingModeMatcher(
3268  const TargetRegisterInfo &TRI, const LoopInfo &LI,
3269  const std::function<const DominatorTree &()> getDTFn, Type *AT,
3270  unsigned AS, Instruction *MI, ExtAddrMode &AM,
3271  const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3272  TypePromotionTransaction &TPT,
3273  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3274  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3275  : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3276  DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn),
3277  AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3278  InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3279  LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3280  IgnoreProfitability = false;
3281  }
3282 
3283 public:
3284  /// Find the maximal addressing mode that a load/store of V can fold,
3285  /// give an access type of AccessTy. This returns a list of involved
3286  /// instructions in AddrModeInsts.
3287  /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3288  /// optimizations.
3289  /// \p PromotedInsts maps the instructions to their type before promotion.
3290  /// \p The ongoing transaction where every action should be registered.
3291  static ExtAddrMode
3292  Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3293  SmallVectorImpl<Instruction *> &AddrModeInsts,
3294  const TargetLowering &TLI, const LoopInfo &LI,
3295  const std::function<const DominatorTree &()> getDTFn,
3296  const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3297  InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3298  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3299  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3301 
3302  bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3303  AccessTy, AS, MemoryInst, Result,
3304  InsertedInsts, PromotedInsts, TPT,
3305  LargeOffsetGEP, OptSize, PSI, BFI)
3306  .matchAddr(V, 0);
3307  (void)Success;
3308  assert(Success && "Couldn't select *anything*?");
3309  return Result;
3310  }
3311 
3312 private:
3313  bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3314  bool matchAddr(Value *Addr, unsigned Depth);
3315  bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3316  bool *MovedAway = nullptr);
3317  bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3318  ExtAddrMode &AMBefore,
3319  ExtAddrMode &AMAfter);
3320  bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3321  bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3322  Value *PromotedOperand) const;
3323 };
3324 
3325 class PhiNodeSet;
3326 
3327 /// An iterator for PhiNodeSet.
3328 class PhiNodeSetIterator {
3329  PhiNodeSet *const Set;
3330  size_t CurrentIndex = 0;
3331 
3332 public:
3333  /// The constructor. Start should point to either a valid element, or be equal
3334  /// to the size of the underlying SmallVector of the PhiNodeSet.
3335  PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3336  PHINode *operator*() const;
3337  PhiNodeSetIterator &operator++();
3338  bool operator==(const PhiNodeSetIterator &RHS) const;
3339  bool operator!=(const PhiNodeSetIterator &RHS) const;
3340 };
3341 
3342 /// Keeps a set of PHINodes.
3343 ///
3344 /// This is a minimal set implementation for a specific use case:
3345 /// It is very fast when there are very few elements, but also provides good
3346 /// performance when there are many. It is similar to SmallPtrSet, but also
3347 /// provides iteration by insertion order, which is deterministic and stable
3348 /// across runs. It is also similar to SmallSetVector, but provides removing
3349 /// elements in O(1) time. This is achieved by not actually removing the element
3350 /// from the underlying vector, so comes at the cost of using more memory, but
3351 /// that is fine, since PhiNodeSets are used as short lived objects.
3352 class PhiNodeSet {
3353  friend class PhiNodeSetIterator;
3354 
3355  using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3356  using iterator = PhiNodeSetIterator;
3357 
3358  /// Keeps the elements in the order of their insertion in the underlying
3359  /// vector. To achieve constant time removal, it never deletes any element.
3361 
3362  /// Keeps the elements in the underlying set implementation. This (and not the
3363  /// NodeList defined above) is the source of truth on whether an element
3364  /// is actually in the collection.
3365  MapType NodeMap;
3366 
3367  /// Points to the first valid (not deleted) element when the set is not empty
3368  /// and the value is not zero. Equals to the size of the underlying vector
3369  /// when the set is empty. When the value is 0, as in the beginning, the
3370  /// first element may or may not be valid.
3371  size_t FirstValidElement = 0;
3372 
3373 public:
3374  /// Inserts a new element to the collection.
3375  /// \returns true if the element is actually added, i.e. was not in the
3376  /// collection before the operation.
3377  bool insert(PHINode *Ptr) {
3378  if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3379  NodeList.push_back(Ptr);
3380  return true;
3381  }
3382  return false;
3383  }
3384 
3385  /// Removes the element from the collection.
3386  /// \returns whether the element is actually removed, i.e. was in the
3387  /// collection before the operation.
3388  bool erase(PHINode *Ptr) {
3389  if (NodeMap.erase(Ptr)) {
3390  SkipRemovedElements(FirstValidElement);
3391  return true;
3392  }
3393  return false;
3394  }
3395 
3396  /// Removes all elements and clears the collection.
3397  void clear() {
3398  NodeMap.clear();
3399  NodeList.clear();
3400  FirstValidElement = 0;
3401  }
3402 
3403  /// \returns an iterator that will iterate the elements in the order of
3404  /// insertion.
3405  iterator begin() {
3406  if (FirstValidElement == 0)
3407  SkipRemovedElements(FirstValidElement);
3408  return PhiNodeSetIterator(this, FirstValidElement);
3409  }
3410 
3411  /// \returns an iterator that points to the end of the collection.
3412  iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3413 
3414  /// Returns the number of elements in the collection.
3415  size_t size() const { return NodeMap.size(); }
3416 
3417  /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3418  size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3419 
3420 private:
3421  /// Updates the CurrentIndex so that it will point to a valid element.
3422  ///
3423  /// If the element of NodeList at CurrentIndex is valid, it does not
3424  /// change it. If there are no more valid elements, it updates CurrentIndex
3425  /// to point to the end of the NodeList.
3426  void SkipRemovedElements(size_t &CurrentIndex) {
3427  while (CurrentIndex < NodeList.size()) {
3428  auto it = NodeMap.find(NodeList[CurrentIndex]);
3429  // If the element has been deleted and added again later, NodeMap will
3430  // point to a different index, so CurrentIndex will still be invalid.
3431  if (it != NodeMap.end() && it->second == CurrentIndex)
3432  break;
3433  ++CurrentIndex;
3434  }
3435  }
3436 };
3437 
3438 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3439  : Set(Set), CurrentIndex(Start) {}
3440 
3442  assert(CurrentIndex < Set->NodeList.size() &&
3443  "PhiNodeSet access out of range");
3444  return Set->NodeList[CurrentIndex];
3445 }
3446 
3447 PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3448  assert(CurrentIndex < Set->NodeList.size() &&
3449  "PhiNodeSet access out of range");
3450  ++CurrentIndex;
3451  Set->SkipRemovedElements(CurrentIndex);
3452  return *this;
3453 }
3454 
3455 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3456  return CurrentIndex == RHS.CurrentIndex;
3457 }
3458 
3459 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3460  return !((*this) == RHS);
3461 }
3462 
3463 /// Keep track of simplification of Phi nodes.
3464 /// Accept the set of all phi nodes and erase phi node from this set
3465 /// if it is simplified.
3466 class SimplificationTracker {
3468  const SimplifyQuery &SQ;
3469  // Tracks newly created Phi nodes. The elements are iterated by insertion
3470  // order.
3471  PhiNodeSet AllPhiNodes;
3472  // Tracks newly created Select nodes.
3473  SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3474 
3475 public:
3476  SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3477 
3478  Value *Get(Value *V) {
3479  do {
3480  auto SV = Storage.find(V);
3481  if (SV == Storage.end())
3482  return V;
3483  V = SV->second;
3484  } while (true);
3485  }
3486 
3487  Value *Simplify(Value *Val) {
3488  SmallVector<Value *, 32> WorkList;
3489  SmallPtrSet<Value *, 32> Visited;
3490  WorkList.push_back(Val);
3491  while (!WorkList.empty()) {
3492  auto *P = WorkList.pop_back_val();
3493  if (!Visited.insert(P).second)
3494  continue;
3495  if (auto *PI = dyn_cast<Instruction>(P))
3496  if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3497  for (auto *U : PI->users())
3498  WorkList.push_back(cast<Value>(U));
3499  Put(PI, V);
3500  PI->replaceAllUsesWith(V);
3501  if (auto *PHI = dyn_cast<PHINode>(PI))
3502  AllPhiNodes.erase(PHI);
3503  if (auto *Select = dyn_cast<SelectInst>(PI))
3504  AllSelectNodes.erase(Select);
3505  PI->eraseFromParent();
3506  }
3507  }
3508  return Get(Val);
3509  }
3510 
3511  void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3512 
3513  void ReplacePhi(PHINode *From, PHINode *To) {
3514  Value *OldReplacement = Get(From);
3515  while (OldReplacement != From) {
3516  From = To;
3517  To = dyn_cast<PHINode>(OldReplacement);
3518  OldReplacement = Get(From);
3519  }
3520  assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3521  Put(From, To);
3522  From->replaceAllUsesWith(To);
3523  AllPhiNodes.erase(From);
3524  From->eraseFromParent();
3525  }
3526 
3527  PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
3528 
3529  void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3530 
3531  void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3532 
3533  unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3534 
3535  unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3536 
3537  void destroyNewNodes(Type *CommonType) {
3538  // For safe erasing, replace the uses with dummy value first.
3539  auto *Dummy = PoisonValue::get(CommonType);
3540  for (auto *I : AllPhiNodes) {
3541  I->replaceAllUsesWith(Dummy);
3542  I->eraseFromParent();
3543  }
3544  AllPhiNodes.clear();
3545  for (auto *I : AllSelectNodes) {
3546  I->replaceAllUsesWith(Dummy);
3547  I->eraseFromParent();
3548  }
3549  AllSelectNodes.clear();
3550  }
3551 };
3552 
3553 /// A helper class for combining addressing modes.
3554 class AddressingModeCombiner {
3555  typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3556  typedef std::pair<PHINode *, PHINode *> PHIPair;
3557 
3558 private:
3559  /// The addressing modes we've collected.
3560  SmallVector<ExtAddrMode, 16> AddrModes;
3561 
3562  /// The field in which the AddrModes differ, when we have more than one.
3563  ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3564 
3565  /// Are the AddrModes that we have all just equal to their original values?
3566  bool AllAddrModesTrivial = true;
3567 
3568  /// Common Type for all different fields in addressing modes.
3569  Type *CommonType = nullptr;
3570 
3571  /// SimplifyQuery for simplifyInstruction utility.
3572  const SimplifyQuery &SQ;
3573 
3574  /// Original Address.
3575  Value *Original;
3576 
3577 public:
3578  AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3579  : SQ(_SQ), Original(OriginalValue) {}
3580 
3581  /// Get the combined AddrMode
3582  const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
3583 
3584  /// Add a new AddrMode if it's compatible with the AddrModes we already
3585  /// have.
3586  /// \return True iff we succeeded in doing so.
3587  bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3588  // Take note of if we have any non-trivial AddrModes, as we need to detect
3589  // when all AddrModes are trivial as then we would introduce a phi or select
3590  // which just duplicates what's already there.
3591  AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3592 
3593  // If this is the first addrmode then everything is fine.
3594  if (AddrModes.empty()) {
3595  AddrModes.emplace_back(NewAddrMode);
3596  return true;
3597  }
3598 
3599  // Figure out how different this is from the other address modes, which we
3600  // can do just by comparing against the first one given that we only care
3601  // about the cumulative difference.
3602  ExtAddrMode::FieldName ThisDifferentField =
3603  AddrModes[0].compare(NewAddrMode);
3604  if (DifferentField == ExtAddrMode::NoField)
3605  DifferentField = ThisDifferentField;
3606  else if (DifferentField != ThisDifferentField)
3607  DifferentField = ExtAddrMode::MultipleFields;
3608 
3609  // If NewAddrMode differs in more than one dimension we cannot handle it.
3610  bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3611 
3612  // If Scale Field is different then we reject.
3613  CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3614 
3615  // We also must reject the case when base offset is different and
3616  // scale reg is not null, we cannot handle this case due to merge of
3617  // different offsets will be used as ScaleReg.
3618  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3619  !NewAddrMode.ScaledReg);
3620 
3621  // We also must reject the case when GV is different and BaseReg installed
3622  // due to we want to use base reg as a merge of GV values.
3623  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3624  !NewAddrMode.HasBaseReg);
3625 
3626  // Even if NewAddMode is the same we still need to collect it due to
3627  // original value is different. And later we will need all original values
3628  // as anchors during finding the common Phi node.
3629  if (CanHandle)
3630  AddrModes.emplace_back(NewAddrMode);
3631  else
3632  AddrModes.clear();
3633 
3634  return CanHandle;
3635  }
3636 
3637  /// Combine the addressing modes we've collected into a single
3638  /// addressing mode.
3639  /// \return True iff we successfully combined them or we only had one so
3640  /// didn't need to combine them anyway.
3641  bool combineAddrModes() {
3642  // If we have no AddrModes then they can't be combined.
3643  if (AddrModes.size() == 0)
3644  return false;
3645 
3646  // A single AddrMode can trivially be combined.
3647  if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3648  return true;
3649 
3650  // If the AddrModes we collected are all just equal to the value they are
3651  // derived from then combining them wouldn't do anything useful.
3652  if (AllAddrModesTrivial)
3653  return false;
3654 
3655  if (!addrModeCombiningAllowed())
3656  return false;
3657 
3658  // Build a map between <original value, basic block where we saw it> to
3659  // value of base register.
3660  // Bail out if there is no common type.
3661  FoldAddrToValueMapping Map;
3662  if (!initializeMap(Map))
3663  return false;
3664 
3665  Value *CommonValue = findCommon(Map);
3666  if (CommonValue)
3667  AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3668  return CommonValue != nullptr;
3669  }
3670 
3671 private:
3672  /// Initialize Map with anchor values. For address seen
3673  /// we set the value of different field saw in this address.
3674  /// At the same time we find a common type for different field we will
3675  /// use to create new Phi/Select nodes. Keep it in CommonType field.
3676  /// Return false if there is no common type found.
3677  bool initializeMap(FoldAddrToValueMapping &Map) {
3678  // Keep track of keys where the value is null. We will need to replace it
3679  // with constant null when we know the common type.
3680  SmallVector<Value *, 2> NullValue;
3681  Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3682  for (auto &AM : AddrModes) {
3683  Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3684  if (DV) {
3685  auto *Type = DV->getType();
3686  if (CommonType && CommonType != Type)
3687  return false;
3688  CommonType = Type;
3689  Map[AM.OriginalValue] = DV;
3690  } else {
3691  NullValue.push_back(AM.OriginalValue);
3692  }
3693  }
3694  assert(CommonType && "At least one non-null value must be!");
3695  for (auto *V : NullValue)
3696  Map[V] = Constant::getNullValue(CommonType);
3697  return true;
3698  }
3699 
3700  /// We have mapping between value A and other value B where B was a field in
3701  /// addressing mode represented by A. Also we have an original value C
3702  /// representing an address we start with. Traversing from C through phi and
3703  /// selects we ended up with A's in a map. This utility function tries to find
3704  /// a value V which is a field in addressing mode C and traversing through phi
3705  /// nodes and selects we will end up in corresponded values B in a map.
3706  /// The utility will create a new Phi/Selects if needed.
3707  // The simple example looks as follows:
3708  // BB1:
3709  // p1 = b1 + 40
3710  // br cond BB2, BB3
3711  // BB2:
3712  // p2 = b2 + 40
3713  // br BB3
3714  // BB3:
3715  // p = phi [p1, BB1], [p2, BB2]
3716  // v = load p
3717  // Map is
3718  // p1 -> b1
3719  // p2 -> b2
3720  // Request is
3721  // p -> ?
3722  // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3723  Value *findCommon(FoldAddrToValueMapping &Map) {
3724  // Tracks the simplification of newly created phi nodes. The reason we use
3725  // this mapping is because we will add new created Phi nodes in AddrToBase.
3726  // Simplification of Phi nodes is recursive, so some Phi node may
3727  // be simplified after we added it to AddrToBase. In reality this
3728  // simplification is possible only if original phi/selects were not
3729  // simplified yet.
3730  // Using this mapping we can find the current value in AddrToBase.
3731  SimplificationTracker ST(SQ);
3732 
3733  // First step, DFS to create PHI nodes for all intermediate blocks.
3734  // Also fill traverse order for the second step.
3735  SmallVector<Value *, 32> TraverseOrder;
3736  InsertPlaceholders(Map, TraverseOrder, ST);
3737 
3738  // Second Step, fill new nodes by merged values and simplify if possible.
3739  FillPlaceholders(Map, TraverseOrder, ST);
3740 
3741  if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3742  ST.destroyNewNodes(CommonType);
3743  return nullptr;
3744  }
3745 
3746  // Now we'd like to match New Phi nodes to existed ones.
3747  unsigned PhiNotMatchedCount = 0;
3748  if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3749  ST.destroyNewNodes(CommonType);
3750  return nullptr;
3751  }
3752 
3753  auto *Result = ST.Get(Map.find(Original)->second);
3754  if (Result) {
3755  NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3756  NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3757  }
3758  return Result;
3759  }
3760 
3761  /// Try to match PHI node to Candidate.
3762  /// Matcher tracks the matched Phi nodes.
3763  bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3764  SmallSetVector<PHIPair, 8> &Matcher,
3765  PhiNodeSet &PhiNodesToMatch) {
3766  SmallVector<PHIPair, 8> WorkList;
3767  Matcher.insert({PHI, Candidate});
3768  SmallSet<PHINode *, 8> MatchedPHIs;
3769  MatchedPHIs.insert(PHI);
3770  WorkList.push_back({PHI, Candidate});
3771  SmallSet<PHIPair, 8> Visited;
3772  while (!WorkList.empty()) {
3773  auto Item = WorkList.pop_back_val();
3774  if (!Visited.insert(Item).second)
3775  continue;
3776  // We iterate over all incoming values to Phi to compare them.
3777  // If values are different and both of them Phi and the first one is a
3778  // Phi we added (subject to match) and both of them is in the same basic
3779  // block then we can match our pair if values match. So we state that
3780  // these values match and add it to work list to verify that.
3781  for (auto *B : Item.first->blocks()) {
3782  Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3783  Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3784  if (FirstValue == SecondValue)
3785  continue;
3786 
3787  PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3788  PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3789 
3790  // One of them is not Phi or
3791  // The first one is not Phi node from the set we'd like to match or
3792  // Phi nodes from different basic blocks then
3793  // we will not be able to match.
3794  if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3795  FirstPhi->getParent() != SecondPhi->getParent())
3796  return false;
3797 
3798  // If we already matched them then continue.
3799  if (Matcher.count({FirstPhi, SecondPhi}))
3800  continue;
3801  // So the values are different and does not match. So we need them to
3802  // match. (But we register no more than one match per PHI node, so that
3803  // we won't later try to replace them twice.)
3804  if (MatchedPHIs.insert(FirstPhi).second)
3805  Matcher.insert({FirstPhi, SecondPhi});
3806  // But me must check it.
3807  WorkList.push_back({FirstPhi, SecondPhi});
3808  }
3809  }
3810  return true;
3811  }
3812 
3813  /// For the given set of PHI nodes (in the SimplificationTracker) try
3814  /// to find their equivalents.
3815  /// Returns false if this matching fails and creation of new Phi is disabled.
3816  bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3817  unsigned &PhiNotMatchedCount) {
3818  // Matched and PhiNodesToMatch iterate their elements in a deterministic
3819  // order, so the replacements (ReplacePhi) are also done in a deterministic
3820  // order.
3822  SmallPtrSet<PHINode *, 8> WillNotMatch;
3823  PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3824  while (PhiNodesToMatch.size()) {
3825  PHINode *PHI = *PhiNodesToMatch.begin();
3826 
3827  // Add us, if no Phi nodes in the basic block we do not match.
3828  WillNotMatch.clear();
3829  WillNotMatch.insert(PHI);
3830 
3831  // Traverse all Phis until we found equivalent or fail to do that.
3832  bool IsMatched = false;
3833  for (auto &P : PHI->getParent()->phis()) {
3834  // Skip new Phi nodes.
3835  if (PhiNodesToMatch.count(&P))
3836  continue;
3837  if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3838  break;
3839  // If it does not match, collect all Phi nodes from matcher.
3840  // if we end up with no match, them all these Phi nodes will not match
3841  // later.
3842  for (auto M : Matched)
3843  WillNotMatch.insert(M.first);
3844  Matched.clear();
3845  }
3846  if (IsMatched) {
3847  // Replace all matched values and erase them.
3848  for (auto MV : Matched)
3849  ST.ReplacePhi(MV.first, MV.second);
3850  Matched.clear();
3851  continue;
3852  }
3853  // If we are not allowed to create new nodes then bail out.
3854  if (!AllowNewPhiNodes)
3855  return false;
3856  // Just remove all seen values in matcher. They will not match anything.
3857  PhiNotMatchedCount += WillNotMatch.size();
3858  for (auto *P : WillNotMatch)
3859  PhiNodesToMatch.erase(P);
3860  }
3861  return true;
3862  }
3863  /// Fill the placeholders with values from predecessors and simplify them.
3864  void FillPlaceholders(FoldAddrToValueMapping &Map,
3865  SmallVectorImpl<Value *> &TraverseOrder,
3866  SimplificationTracker &ST) {
3867  while (!TraverseOrder.empty()) {
3868  Value *Current = TraverseOrder.pop_back_val();
3869  assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3870  Value *V = Map[Current];
3871 
3872  if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3873  // CurrentValue also must be Select.
3874  auto *CurrentSelect = cast<SelectInst>(Current);
3875  auto *TrueValue = CurrentSelect->getTrueValue();
3876  assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3877  Select->setTrueValue(ST.Get(Map[TrueValue]));
3878  auto *FalseValue = CurrentSelect->getFalseValue();
3879  assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3880  Select->setFalseValue(ST.Get(Map[FalseValue]));
3881  } else {
3882  // Must be a Phi node then.
3883  auto *PHI = cast<PHINode>(V);
3884  // Fill the Phi node with values from predecessors.
3885  for (auto *B : predecessors(PHI->getParent())) {
3886  Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3887  assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3888  PHI->addIncoming(ST.Get(Map[PV]), B);
3889  }
3890  }
3891  Map[Current] = ST.Simplify(V);
3892  }
3893  }
3894 
3895  /// Starting from original value recursively iterates over def-use chain up to
3896  /// known ending values represented in a map. For each traversed phi/select
3897  /// inserts a placeholder Phi or Select.
3898  /// Reports all new created Phi/Select nodes by adding them to set.
3899  /// Also reports and order in what values have been traversed.
3900  void InsertPlaceholders(FoldAddrToValueMapping &Map,
3901  SmallVectorImpl<Value *> &TraverseOrder,
3902  SimplificationTracker &ST) {
3903  SmallVector<Value *, 32> Worklist;
3904  assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3905  "Address must be a Phi or Select node");
3906  auto *Dummy = PoisonValue::get(CommonType);
3907  Worklist.push_back(Original);
3908  while (!Worklist.empty()) {
3909  Value *Current = Worklist.pop_back_val();
3910  // if it is already visited or it is an ending value then skip it.
3911  if (Map.find(Current) != Map.end())
3912  continue;
3913  TraverseOrder.push_back(Current);
3914 
3915  // CurrentValue must be a Phi node or select. All others must be covered
3916  // by anchors.
3917  if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3918  // Is it OK to get metadata from OrigSelect?!
3919  // Create a Select placeholder with dummy value.
3920  SelectInst *Select = SelectInst::Create(
3921  CurrentSelect->getCondition(), Dummy, Dummy,
3922  CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3923  Map[Current] = Select;
3924  ST.insertNewSelect(Select);
3925  // We are interested in True and False values.
3926  Worklist.push_back(CurrentSelect->getTrueValue());
3927  Worklist.push_back(CurrentSelect->getFalseValue());
3928  } else {
3929  // It must be a Phi node then.
3930  PHINode *CurrentPhi = cast<PHINode>(Current);
3931  unsigned PredCount = CurrentPhi->getNumIncomingValues();
3932  PHINode *PHI =
3933  PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3934  Map[Current] = PHI;
3935  ST.insertNewPhi(PHI);
3936  append_range(Worklist, CurrentPhi->incoming_values());
3937  }
3938  }
3939  }
3940 
3941  bool addrModeCombiningAllowed() {
3943  return false;
3944  switch (DifferentField) {
3945  default:
3946  return false;
3947  case ExtAddrMode::BaseRegField:
3948  return AddrSinkCombineBaseReg;
3949  case ExtAddrMode::BaseGVField:
3950  return AddrSinkCombineBaseGV;
3951  case ExtAddrMode::BaseOffsField:
3952  return AddrSinkCombineBaseOffs;
3953  case ExtAddrMode::ScaledRegField:
3954  return AddrSinkCombineScaledReg;
3955  }
3956  }
3957 };
3958 } // end anonymous namespace
3959 
3960 /// Try adding ScaleReg*Scale to the current addressing mode.
3961 /// Return true and update AddrMode if this addr mode is legal for the target,
3962 /// false if not.
3963 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3964  unsigned Depth) {
3965  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3966  // mode. Just process that directly.
3967  if (Scale == 1)
3968  return matchAddr(ScaleReg, Depth);
3969 
3970  // If the scale is 0, it takes nothing to add this.
3971  if (Scale == 0)
3972  return true;
3973 
3974  // If we already have a scale of this value, we can add to it, otherwise, we
3975  // need an available scale field.
3976  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3977  return false;
3978 
3979  ExtAddrMode TestAddrMode = AddrMode;
3980 
3981  // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3982  // [A+B + A*7] -> [B+A*8].
3983  TestAddrMode.Scale += Scale;
3984  TestAddrMode.ScaledReg = ScaleReg;
3985 
3986  // If the new address isn't legal, bail out.
3987  if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3988  return false;
3989 
3990  // It was legal, so commit it.
3991  AddrMode = TestAddrMode;
3992 
3993  // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3994  // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3995  // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
3996  // go any further: we can reuse it and cannot eliminate it.
3997  ConstantInt *CI = nullptr;
3998  Value *AddLHS = nullptr;
3999  if (isa<Instruction>(ScaleReg) && // not a constant expr.
4000  match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4001  !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4002  TestAddrMode.InBounds = false;
4003  TestAddrMode.ScaledReg = AddLHS;
4004  TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4005 
4006  // If this addressing mode is legal, commit it and remember that we folded
4007  // this instruction.
4008  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4009  AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4010  AddrMode = TestAddrMode;
4011  return true;
4012  }
4013  // Restore status quo.
4014  TestAddrMode = AddrMode;
4015  }
4016 
4017  // If this is an add recurrence with a constant step, return the increment
4018  // instruction and the canonicalized step.
4019  auto GetConstantStep =
4020  [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4021  auto *PN = dyn_cast<PHINode>(V);
4022  if (!PN)
4023  return None;
4024  auto IVInc = getIVIncrement(PN, &LI);
4025  if (!IVInc)
4026  return None;
4027  // TODO: The result of the intrinsics above is two-compliment. However when
4028  // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4029  // If it has nuw or nsw flags, we need to make sure that these flags are
4030  // inferrable at the point of memory instruction. Otherwise we are replacing
4031  // well-defined two-compliment computation with poison. Currently, to avoid
4032  // potentially complex analysis needed to prove this, we reject such cases.
4033  if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4034  if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4035  return None;
4036  if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4037  return std::make_pair(IVInc->first, ConstantStep->getValue());
4038  return None;
4039  };
4040 
4041  // Try to account for the following special case:
4042  // 1. ScaleReg is an inductive variable;
4043  // 2. We use it with non-zero offset;
4044  // 3. IV's increment is available at the point of memory instruction.
4045  //
4046  // In this case, we may reuse the IV increment instead of the IV Phi to
4047  // achieve the following advantages:
4048  // 1. If IV step matches the offset, we will have no need in the offset;
4049  // 2. Even if they don't match, we will reduce the overlap of living IV
4050  // and IV increment, that will potentially lead to better register
4051  // assignment.
4052  if (AddrMode.BaseOffs) {
4053  if (auto IVStep = GetConstantStep(ScaleReg)) {
4054  Instruction *IVInc = IVStep->first;
4055  // The following assert is important to ensure a lack of infinite loops.
4056  // This transforms is (intentionally) the inverse of the one just above.
4057  // If they don't agree on the definition of an increment, we'd alternate
4058  // back and forth indefinitely.
4059  assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4060  APInt Step = IVStep->second;
4061  APInt Offset = Step * AddrMode.Scale;
4062  if (Offset.isSignedIntN(64)) {
4063  TestAddrMode.InBounds = false;
4064  TestAddrMode.ScaledReg = IVInc;
4065  TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4066  // If this addressing mode is legal, commit it..
4067  // (Note that we defer the (expensive) domtree base legality check
4068  // to the very last possible point.)
4069  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4070  getDTFn().dominates(IVInc, MemoryInst)) {
4071  AddrModeInsts.push_back(cast<Instruction>(IVInc));
4072  AddrMode = TestAddrMode;
4073  return true;
4074  }
4075  // Restore status quo.
4076  TestAddrMode = AddrMode;
4077  }
4078  }
4079  }
4080 
4081  // Otherwise, just return what we have.
4082  return true;
4083 }
4084 
4085 /// This is a little filter, which returns true if an addressing computation
4086 /// involving I might be folded into a load/store accessing it.
4087 /// This doesn't need to be perfect, but needs to accept at least
4088 /// the set of instructions that MatchOperationAddr can.
4090  switch (I->getOpcode()) {
4091  case Instruction::BitCast:
4092  case Instruction::AddrSpaceCast:
4093  // Don't touch identity bitcasts.
4094  if (I->getType() == I->getOperand(0)->getType())
4095  return false;
4096  return I->getType()->isIntOrPtrTy();
4097  case Instruction::PtrToInt:
4098  // PtrToInt is always a noop, as we know that the int type is pointer sized.
4099  return true;
4100  case Instruction::IntToPtr:
4101  // We know the input is intptr_t, so this is foldable.
4102  return true;
4103  case Instruction::Add:
4104  return true;
4105  case Instruction::Mul:
4106  case Instruction::Shl:
4107  // Can only handle X*C and X << C.
4108  return isa<ConstantInt>(I->getOperand(1));
4109  case Instruction::GetElementPtr:
4110  return true;
4111  default:
4112  return false;
4113  }
4114 }
4115 
4116 /// Check whether or not \p Val is a legal instruction for \p TLI.
4117 /// \note \p Val is assumed to be the product of some type promotion.
4118 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4119 /// to be legal, as the non-promoted value would have had the same state.
4121  const DataLayout &DL, Value *Val) {
4122  Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4123  if (!PromotedInst)
4124  return false;
4125  int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4126  // If the ISDOpcode is undefined, it was undefined before the promotion.
4127  if (!ISDOpcode)
4128  return true;
4129  // Otherwise, check if the promoted instruction is legal or not.
4130  return TLI.isOperationLegalOrCustom(
4131  ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4132 }
4133 
4134 namespace {
4135 
4136 /// Hepler class to perform type promotion.
4137 class TypePromotionHelper {
4138  /// Utility function to add a promoted instruction \p ExtOpnd to
4139  /// \p PromotedInsts and record the type of extension we have seen.
4140  static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4141  Instruction *ExtOpnd, bool IsSExt) {
4142  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4143  InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4144  if (It != PromotedInsts.end()) {
4145  // If the new extension is same as original, the information in
4146  // PromotedInsts[ExtOpnd] is still correct.
4147  if (It->second.getInt() == ExtTy)
4148  return;
4149 
4150  // Now the new extension is different from old extension, we make
4151  // the type information invalid by setting extension type to
4152  // BothExtension.
4153  ExtTy = BothExtension;
4154  }
4155  PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4156  }
4157 
4158  /// Utility function to query the original type of instruction \p Opnd
4159  /// with a matched extension type. If the extension doesn't match, we
4160  /// cannot use the information we had on the original type.
4161  /// BothExtension doesn't match any extension type.
4162  static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4163  Instruction *Opnd, bool IsSExt) {
4164  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4165  InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4166  if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4167  return It->second.getPointer();
4168  return nullptr;
4169  }
4170 
4171  /// Utility function to check whether or not a sign or zero extension
4172  /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4173  /// either using the operands of \p Inst or promoting \p Inst.
4174  /// The type of the extension is defined by \p IsSExt.
4175  /// In other words, check if:
4176  /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4177  /// #1 Promotion applies:
4178  /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4179  /// #2 Operand reuses:
4180  /// ext opnd1 to ConsideredExtType.
4181  /// \p PromotedInsts maps the instructions to their type before promotion.
4182  static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4183  const InstrToOrigTy &PromotedInsts, bool IsSExt);
4184 
4185  /// Utility function to determine if \p OpIdx should be promoted when
4186  /// promoting \p Inst.
4187  static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4188  return !(isa<SelectInst>(Inst) && OpIdx == 0);
4189  }
4190 
4191  /// Utility function to promote the operand of \p Ext when this
4192  /// operand is a promotable trunc or sext or zext.
4193  /// \p PromotedInsts maps the instructions to their type before promotion.
4194  /// \p CreatedInstsCost[out] contains the cost of all instructions
4195  /// created to promote the operand of Ext.
4196  /// Newly added extensions are inserted in \p Exts.
4197  /// Newly added truncates are inserted in \p Truncs.
4198  /// Should never be called directly.
4199  /// \return The promoted value which is used instead of Ext.
4200  static Value *promoteOperandForTruncAndAnyExt(
4201  Instruction *Ext, TypePromotionTransaction &TPT,
4202  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4204  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4205 
4206  /// Utility function to promote the operand of \p Ext when this
4207  /// operand is promotable and is not a supported trunc or sext.
4208  /// \p PromotedInsts maps the instructions to their type before promotion.
4209  /// \p CreatedInstsCost[out] contains the cost of all the instructions
4210  /// created to promote the operand of Ext.
4211  /// Newly added extensions are inserted in \p Exts.
4212  /// Newly added truncates are inserted in \p Truncs.
4213  /// Should never be called directly.
4214  /// \return The promoted value which is used instead of Ext.
4215  static Value *promoteOperandForOther(Instruction *Ext,
4216  TypePromotionTransaction &TPT,
4217  InstrToOrigTy &PromotedInsts,
4218  unsigned &CreatedInstsCost,
4221  const TargetLowering &TLI, bool IsSExt);
4222 
4223  /// \see promoteOperandForOther.
4224  static Value *signExtendOperandForOther(
4225  Instruction *Ext, TypePromotionTransaction &TPT,
4226  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4228  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4229  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4230  Exts, Truncs, TLI, true);
4231  }
4232 
4233  /// \see promoteOperandForOther.
4234  static Value *zeroExtendOperandForOther(
4235  Instruction *Ext, TypePromotionTransaction &TPT,
4236  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4238  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4239  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4240  Exts, Truncs, TLI, false);
4241  }
4242 
4243 public:
4244  /// Type for the utility function that promotes the operand of Ext.
4245  using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4246  InstrToOrigTy &PromotedInsts,
4247  unsigned &CreatedInstsCost,
4250  const TargetLowering &TLI);
4251 
4252  /// Given a sign/zero extend instruction \p Ext, return the appropriate
4253  /// action to promote the operand of \p Ext instead of using Ext.
4254  /// \return NULL if no promotable action is possible with the current
4255  /// sign extension.
4256  /// \p InsertedInsts keeps track of all the instructions inserted by the
4257  /// other CodeGenPrepare optimizations. This information is important
4258  /// because we do not want to promote these instructions as CodeGenPrepare
4259  /// will reinsert them later. Thus creating an infinite loop: create/remove.
4260  /// \p PromotedInsts maps the instructions to their type before promotion.
4261  static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4262  const TargetLowering &TLI,
4263  const InstrToOrigTy &PromotedInsts);
4264 };
4265 
4266 } // end anonymous namespace
4267 
4268 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4269  Type *ConsideredExtType,
4270  const InstrToOrigTy &PromotedInsts,
4271  bool IsSExt) {
4272  // The promotion helper does not know how to deal with vector types yet.
4273  // To be able to fix that, we would need to fix the places where we
4274  // statically extend, e.g., constants and such.
4275  if (Inst->getType()->isVectorTy())
4276  return false;
4277 
4278  // We can always get through zext.
4279  if (isa<ZExtInst>(Inst))
4280  return true;
4281 
4282  // sext(sext) is ok too.
4283  if (IsSExt && isa<SExtInst>(Inst))
4284  return true;
4285 
4286  // We can get through binary operator, if it is legal. In other words, the
4287  // binary operator must have a nuw or nsw flag.
4288  if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4289  if (isa<OverflowingBinaryOperator>(BinOp) &&
4290  ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4291  (IsSExt && BinOp->hasNoSignedWrap())))
4292  return true;
4293 
4294  // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4295  if ((Inst->getOpcode() == Instruction::And ||
4296  Inst->getOpcode() == Instruction::Or))
4297  return true;
4298 
4299  // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4300  if (Inst->getOpcode() == Instruction::Xor) {
4301  // Make sure it is not a NOT.
4302  if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4303  if (!Cst->getValue().isAllOnes())
4304  return true;
4305  }
4306 
4307  // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4308  // It may change a poisoned value into a regular value, like
4309  // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4310  // poisoned value regular value
4311  // It should be OK since undef covers valid value.
4312  if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4313  return true;
4314 
4315  // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4316  // It may change a poisoned value into a regular value, like
4317  // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4318  // poisoned value regular value
4319  // It should be OK since undef covers valid value.
4320  if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4321  const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4322  if (ExtInst->hasOneUse()) {
4323  const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4324  if (AndInst && AndInst->getOpcode() == Instruction::And) {
4325  const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4326  if (Cst &&
4327  Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4328  return true;
4329  }
4330  }
4331  }
4332 
4333  // Check if we can do the following simplification.
4334  // ext(trunc(opnd)) --> ext(opnd)
4335  if (!isa<TruncInst>(Inst))
4336  return false;
4337 
4338  Value *OpndVal = Inst->getOperand(0);
4339  // Check if we can use this operand in the extension.
4340  // If the type is larger than the result type of the extension, we cannot.
4341  if (!OpndVal->getType()->isIntegerTy() ||
4342  OpndVal->getType()->getIntegerBitWidth() >
4343  ConsideredExtType->getIntegerBitWidth())
4344  return false;
4345 
4346  // If the operand of the truncate is not an instruction, we will not have
4347  // any information on the dropped bits.
4348  // (Actually we could for constant but it is not worth the extra logic).
4349  Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4350  if (!Opnd)
4351  return false;
4352 
4353  // Check if the source of the type is narrow enough.
4354  // I.e., check that trunc just drops extended bits of the same kind of
4355  // the extension.
4356  // #1 get the type of the operand and check the kind of the extended bits.
4357  const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4358  if (OpndType)
4359  ;
4360  else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4361  OpndType = Opnd->getOperand(0)->getType();
4362  else
4363  return false;
4364 
4365  // #2 check that the truncate just drops extended bits.
4366  return Inst->getType()->getIntegerBitWidth() >=
4367  OpndType->getIntegerBitWidth();
4368 }
4369 
4370 TypePromotionHelper::Action TypePromotionHelper::getAction(
4371  Instruction *Ext, const SetOfInstrs &InsertedInsts,
4372  const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4373  assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4374  "Unexpected instruction type");
4375  Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4376  Type *ExtTy = Ext->getType();
4377  bool IsSExt = isa<SExtInst>(Ext);
4378  // If the operand of the extension is not an instruction, we cannot
4379  // get through.
4380  // If it, check we can get through.
4381  if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4382  return nullptr;
4383 
4384  // Do not promote if the operand has been added by codegenprepare.
4385  // Otherwise, it means we are undoing an optimization that is likely to be
4386  // redone, thus causing potential infinite loop.
4387  if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4388  return nullptr;
4389 
4390  // SExt or Trunc instructions.
4391  // Return the related handler.
4392  if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4393  isa<ZExtInst>(ExtOpnd))
4394  return promoteOperandForTruncAndAnyExt;
4395 
4396  // Regular instruction.
4397  // Abort early if we will have to insert non-free instructions.
4398  if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4399  return nullptr;
4400  return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4401 }
4402 
4403 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4404  Instruction *SExt, TypePromotionTransaction &TPT,
4405  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4407  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4408  // By construction, the operand of SExt is an instruction. Otherwise we cannot
4409  // get through it and this method should not be called.
4410  Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4411  Value *ExtVal = SExt;
4412  bool HasMergedNonFreeExt = false;
4413  if (isa<ZExtInst>(SExtOpnd)) {
4414  // Replace s|zext(zext(opnd))
4415  // => zext(opnd).
4416  HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4417  Value *ZExt =
4418  TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4419  TPT.replaceAllUsesWith(SExt, ZExt);
4420  TPT.eraseInstruction(SExt);
4421  ExtVal = ZExt;
4422  } else {
4423  // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4424  // => z|sext(opnd).
4425  TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4426  }
4427  CreatedInstsCost = 0;
4428 
4429  // Remove dead code.
4430  if (SExtOpnd->use_empty())
4431  TPT.eraseInstruction(SExtOpnd);
4432 
4433  // Check if the extension is still needed.
4434  Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4435  if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4436  if (ExtInst) {
4437  if (Exts)
4438  Exts->push_back(ExtInst);
4439  CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4440  }
4441  return ExtVal;
4442  }
4443 
4444  // At this point we have: ext ty opnd to ty.
4445  // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4446  Value *NextVal = ExtInst->getOperand(0);
4447  TPT.eraseInstruction(ExtInst, NextVal);
4448  return NextVal;
4449 }
4450 
4451 Value *TypePromotionHelper::promoteOperandForOther(
4452  Instruction *Ext, TypePromotionTransaction &TPT,
4453  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4455  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4456  bool IsSExt) {
4457  // By construction, the operand of Ext is an instruction. Otherwise we cannot
4458  // get through it and this method should not be called.
4459  Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4460  CreatedInstsCost = 0;
4461  if (!ExtOpnd->hasOneUse()) {
4462  // ExtOpnd will be promoted.
4463  // All its uses, but Ext, will need to use a truncated value of the
4464  // promoted version.
4465  // Create the truncate now.
4466  Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4467  if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4468  // Insert it just after the definition.
4469  ITrunc->moveAfter(ExtOpnd);
4470  if (Truncs)
4471  Truncs->push_back(ITrunc);
4472  }
4473 
4474  TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4475  // Restore the operand of Ext (which has been replaced by the previous call
4476  // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4477  TPT.setOperand(Ext, 0, ExtOpnd);
4478  }
4479 
4480  // Get through the Instruction:
4481  // 1. Update its type.
4482  // 2. Replace the uses of Ext by Inst.
4483  // 3. Extend each operand that needs to be extended.
4484 
4485  // Remember the original type of the instruction before promotion.
4486  // This is useful to know that the high bits are sign extended bits.
4487  addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4488  // Step #1.
4489  TPT.mutateType(ExtOpnd, Ext->getType());
4490  // Step #2.
4491  TPT.replaceAllUsesWith(Ext, ExtOpnd);
4492  // Step #3.
4493  Instruction *ExtForOpnd = Ext;
4494 
4495  LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4496  for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4497  ++OpIdx) {
4498  LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4499  if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4500  !shouldExtOperand(ExtOpnd, OpIdx)) {
4501  LLVM_DEBUG(dbgs() << "No need to propagate\n");
4502  continue;
4503  }
4504  // Check if we can statically extend the operand.
4505  Value *Opnd = ExtOpnd->getOperand(OpIdx);
4506  if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4507  LLVM_DEBUG(dbgs() << "Statically extend\n");
4508  unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4509  APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4510  : Cst->getValue().zext(BitWidth);
4511  TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4512  continue;
4513  }
4514  // UndefValue are typed, so we have to statically sign extend them.
4515  if (isa<UndefValue>(Opnd)) {
4516  LLVM_DEBUG(dbgs() << "Statically extend\n");
4517  TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4518  continue;
4519  }
4520 
4521  // Otherwise we have to explicitly sign extend the operand.
4522  // Check if Ext was reused to extend an operand.
4523  if (!ExtForOpnd) {
4524  // If yes, create a new one.
4525  LLVM_DEBUG(dbgs() << "More operands to ext\n");
4526  Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4527  : TPT.createZExt(Ext, Opnd, Ext->getType());
4528  if (!isa<Instruction>(ValForExtOpnd)) {
4529  TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4530  continue;
4531  }
4532  ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4533  }
4534  if (Exts)
4535  Exts->push_back(ExtForOpnd);
4536  TPT.setOperand(ExtForOpnd, 0, Opnd);
4537 
4538  // Move the sign extension before the insertion point.
4539  TPT.moveBefore(ExtForOpnd, ExtOpnd);
4540  TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4541  CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4542  // If more sext are required, new instructions will have to be created.
4543  ExtForOpnd = nullptr;
4544  }
4545  if (ExtForOpnd == Ext) {
4546  LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4547  TPT.eraseInstruction(Ext);
4548  }
4549  return ExtOpnd;
4550 }
4551 
4552 /// Check whether or not promoting an instruction to a wider type is profitable.
4553 /// \p NewCost gives the cost of extension instructions created by the
4554 /// promotion.
4555 /// \p OldCost gives the cost of extension instructions before the promotion
4556 /// plus the number of instructions that have been
4557 /// matched in the addressing mode the promotion.
4558 /// \p PromotedOperand is the value that has been promoted.
4559 /// \return True if the promotion is profitable, false otherwise.
4560 bool AddressingModeMatcher::isPromotionProfitable(
4561  unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4562  LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4563  << '\n');
4564  // The cost of the new extensions is greater than the cost of the
4565  // old extension plus what we folded.
4566  // This is not profitable.
4567  if (NewCost > OldCost)
4568  return false;
4569  if (NewCost < OldCost)
4570  return true;
4571  // The promotion is neutral but it may help folding the sign extension in
4572  // loads for instance.
4573  // Check that we did not create an illegal instruction.
4574  return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4575 }
4576 
4577 /// Given an instruction or constant expr, see if we can fold the operation
4578 /// into the addressing mode. If so, update the addressing mode and return
4579 /// true, otherwise return false without modifying AddrMode.
4580 /// If \p MovedAway is not NULL, it contains the information of whether or
4581 /// not AddrInst has to be folded into the addressing mode on success.
4582 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4583 /// because it has been moved away.
4584 /// Thus AddrInst must not be added in the matched instructions.
4585 /// This state can happen when AddrInst is a sext, since it may be moved away.
4586 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4587 /// not be referenced anymore.
4588 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4589  unsigned Depth,
4590  bool *MovedAway) {
4591  // Avoid exponential behavior on extremely deep expression trees.
4592  if (Depth >= 5)
4593  return false;
4594 
4595  // By default, all matched instructions stay in place.
4596  if (MovedAway)
4597  *MovedAway = false;
4598 
4599  switch (Opcode) {
4600  case Instruction::PtrToInt:
4601  // PtrToInt is always a noop, as we know that the int type is pointer sized.
4602  return matchAddr(AddrInst->getOperand(0), Depth);
4603  case Instruction::IntToPtr: {
4604  auto AS = AddrInst->getType()->getPointerAddressSpace();
4605  auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4606  // This inttoptr is a no-op if the integer type is pointer sized.
4607  if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4608  return matchAddr(AddrInst->getOperand(0), Depth);
4609  return false;
4610  }
4611  case Instruction::BitCast:
4612  // BitCast is always a noop, and we can handle it as long as it is
4613  // int->int or pointer->pointer (we don't want int<->fp or something).
4614  if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4615  // Don't touch identity bitcasts. These were probably put here by LSR,
4616  // and we don't want to mess around with them. Assume it knows what it
4617  // is doing.
4618  AddrInst->getOperand(0)->getType() != AddrInst->getType())
4619  return matchAddr(AddrInst->getOperand(0), Depth);
4620  return false;
4621  case Instruction::AddrSpaceCast: {
4622  unsigned SrcAS =
4623  AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4624  unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4625  if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4626  return matchAddr(AddrInst->getOperand(0), Depth);
4627  return false;
4628  }
4629  case Instruction::Add: {
4630  // Check to see if we can merge in the RHS then the LHS. If so, we win.
4631  ExtAddrMode BackupAddrMode = AddrMode;
4632  unsigned OldSize = AddrModeInsts.size();
4633  // Start a transaction at this point.
4634  // The LHS may match but not the RHS.
4635  // Therefore, we need a higher level restoration point to undo partially
4636  // matched operation.
4637  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4638  TPT.getRestorationPoint();
4639 
4640  AddrMode.InBounds = false;
4641  if (matchAddr(AddrInst->getOperand(1), Depth + 1) &&
4642  matchAddr(AddrInst->getOperand(0), Depth + 1))
4643  return true;
4644 
4645  // Restore the old addr mode info.
4646  AddrMode = BackupAddrMode;
4647  AddrModeInsts.resize(OldSize);
4648  TPT.rollback(LastKnownGood);
4649 
4650  // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4651  if (matchAddr(AddrInst->getOperand(0), Depth + 1) &&
4652  matchAddr(AddrInst->getOperand(1), Depth + 1))
4653  return true;
4654 
4655  // Otherwise we definitely can't merge the ADD in.
4656  AddrMode = BackupAddrMode;
4657  AddrModeInsts.resize(OldSize);
4658  TPT.rollback(LastKnownGood);
4659  break;
4660  }
4661  // case Instruction::Or:
4662  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4663  // break;
4664  case Instruction::Mul:
4665  case Instruction::Shl: {
4666  // Can only handle X*C and X << C.
4667  AddrMode.InBounds = false;
4668  ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4669  if (!RHS || RHS->getBitWidth() > 64)
4670  return false;
4671  int64_t Scale = Opcode == Instruction::Shl
4672  ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
4673  : RHS->getSExtValue();
4674 
4675  return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4676  }
4677  case Instruction::GetElementPtr: {
4678  // Scan the GEP. We check it if it contains constant offsets and at most
4679  // one variable offset.
4680  int VariableOperand = -1;
4681  unsigned VariableScale = 0;
4682 
4683  int64_t ConstantOffset = 0;
4684  gep_type_iterator GTI = gep_type_begin(AddrInst);
4685  for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4686  if (StructType *STy = GTI.getStructTypeOrNull()) {
4687  const StructLayout *SL = DL.getStructLayout(STy);
4688  unsigned Idx =
4689  cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4690  ConstantOffset += SL->getElementOffset(Idx);
4691  } else {
4692  TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType());
4693  if (TS.isNonZero()) {
4694  // The optimisations below currently only work for fixed offsets.
4695  if (TS.isScalable())
4696  return false;
4697  int64_t TypeSize = TS.getFixedSize();
4698  if (ConstantInt *CI =
4699  dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4700  const APInt &CVal = CI->getValue();
4701  if (CVal.getMinSignedBits() <= 64) {
4702  ConstantOffset += CVal.getSExtValue() * TypeSize;
4703  continue;
4704  }
4705  }
4706  // We only allow one variable index at the moment.
4707  if (VariableOperand != -1)
4708  return false;
4709 
4710  // Remember the variable index.
4711  VariableOperand = i;
4712  VariableScale = TypeSize;
4713  }
4714  }
4715  }
4716 
4717  // A common case is for the GEP to only do a constant offset. In this case,
4718  // just add it to the disp field and check validity.
4719  if (VariableOperand == -1) {
4720  AddrMode.BaseOffs += ConstantOffset;
4721  if (ConstantOffset == 0 ||
4722  TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4723  // Check to see if we can fold the base pointer in too.
4724  if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4725  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4726  AddrMode.InBounds = false;
4727  return true;
4728  }
4729  } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4730  TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4731  ConstantOffset > 0) {
4732  // Record GEPs with non-zero offsets as candidates for splitting in the
4733  // event that the offset cannot fit into the r+i addressing mode.
4734  // Simple and common case that only one GEP is used in calculating the
4735  // address for the memory access.
4736  Value *Base = AddrInst->getOperand(0);
4737  auto *BaseI = dyn_cast<Instruction>(Base);
4738  auto *GEP = cast<GetElementPtrInst>(AddrInst);
4739  if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4740  (BaseI && !isa<CastInst>(BaseI) &&
4741  !isa<GetElementPtrInst>(BaseI))) {
4742  // Make sure the parent block allows inserting non-PHI instructions
4743  // before the terminator.
4744  BasicBlock *Parent =
4745  BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4746  if (!Parent->getTerminator()->isEHPad())
4747  LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4748  }
4749  }
4750  AddrMode.BaseOffs -= ConstantOffset;
4751  return false;
4752  }
4753 
4754  // Save the valid addressing mode in case we can't match.
4755  ExtAddrMode BackupAddrMode = AddrMode;
4756  unsigned OldSize = AddrModeInsts.size();
4757 
4758  // See if the scale and offset amount is valid for this target.
4759  AddrMode.BaseOffs += ConstantOffset;
4760  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4761  AddrMode.InBounds = false;
4762 
4763  // Match the base operand of the GEP.
4764  if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4765  // If it couldn't be matched, just stuff the value in a register.
4766  if (AddrMode.HasBaseReg) {
4767  AddrMode = BackupAddrMode;
4768  AddrModeInsts.resize(OldSize);
4769  return false;
4770  }
4771  AddrMode.HasBaseReg = true;
4772  AddrMode.BaseReg = AddrInst->getOperand(0);
4773  }
4774 
4775  // Match the remaining variable portion of the GEP.
4776  if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4777  Depth)) {
4778  // If it couldn't be matched, try stuffing the base into a register
4779  // instead of matching it, and retrying the match of the scale.
4780  AddrMode = BackupAddrMode;
4781  AddrModeInsts.resize(OldSize);
4782  if (AddrMode.HasBaseReg)
4783  return false;
4784  AddrMode.HasBaseReg = true;
4785  AddrMode.BaseReg = AddrInst->getOperand(0);
4786  AddrMode.BaseOffs += ConstantOffset;
4787  if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4788  VariableScale, Depth)) {
4789  // If even that didn't work, bail.
4790  AddrMode = BackupAddrMode;
4791  AddrModeInsts.resize(OldSize);
4792  return false;
4793  }
4794  }
4795 
4796  return true;
4797  }
4798  case Instruction::SExt:
4799  case Instruction::ZExt: {
4800  Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4801  if (!Ext)
4802  return false;
4803 
4804  // Try to move this ext out of the way of the addressing mode.
4805  // Ask for a method for doing so.
4806  TypePromotionHelper::Action TPH =
4807  TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4808  if (!TPH)
4809  return false;
4810 
4811  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4812  TPT.getRestorationPoint();
4813  unsigned CreatedInstsCost = 0;
4814  unsigned ExtCost = !TLI.isExtFree(Ext);
4815  Value *PromotedOperand =
4816  TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4817  // SExt has been moved away.
4818  // Thus either it will be rematched later in the recursive calls or it is
4819  // gone. Anyway, we must not fold it into the addressing mode at this point.
4820  // E.g.,
4821  // op = add opnd, 1
4822  // idx = ext op
4823  // addr = gep base, idx
4824  // is now:
4825  // promotedOpnd = ext opnd <- no match here
4826  // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4827  // addr = gep base, op <- match
4828  if (MovedAway)
4829  *MovedAway = true;
4830 
4831  assert(PromotedOperand &&
4832  "TypePromotionHelper should have filtered out those cases");
4833 
4834  ExtAddrMode BackupAddrMode = AddrMode;
4835  unsigned OldSize = AddrModeInsts.size();
4836 
4837  if (!matchAddr(PromotedOperand, Depth) ||
4838  // The total of the new cost is equal to the cost of the created
4839  // instructions.
4840  // The total of the old cost is equal to the cost of the extension plus
4841  // what we have saved in the addressing mode.
4842  !isPromotionProfitable(CreatedInstsCost,
4843  ExtCost + (AddrModeInsts.size() - OldSize),
4844  PromotedOperand)) {
4845  AddrMode = BackupAddrMode;
4846  AddrModeInsts.resize(OldSize);
4847  LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4848  TPT.rollback(LastKnownGood);
4849  return false;
4850  }
4851  return true;
4852  }
4853  }
4854  return false;
4855 }
4856 
4857 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4858 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4859 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4860 /// for the target.
4861 ///
4862 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4863  // Start a transaction at this point that we will rollback if the matching
4864  // fails.
4865  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4866  TPT.getRestorationPoint();
4867  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4868  if (CI->getValue().isSignedIntN(64)) {
4869  // Fold in immediates if legal for the target.
4870  AddrMode.BaseOffs += CI->getSExtValue();
4871  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4872  return true;
4873  AddrMode.BaseOffs -= CI->getSExtValue();
4874  }
4875  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4876  // If this is a global variable, try to fold it into the addressing mode.
4877  if (!AddrMode.BaseGV) {
4878  AddrMode.BaseGV = GV;
4879  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4880  return true;
4881  AddrMode.BaseGV = nullptr;
4882  }
4883  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4884  ExtAddrMode BackupAddrMode = AddrMode;
4885  unsigned OldSize = AddrModeInsts.size();
4886 
4887  // Check to see if it is possible to fold this operation.
4888  bool MovedAway = false;
4889  if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4890  // This instruction may have been moved away. If so, there is nothing
4891  // to check here.
4892  if (MovedAway)
4893  return true;
4894  // Okay, it's possible to fold this. Check to see if it is actually
4895  // *profitable* to do so. We use a simple cost model to avoid increasing
4896  // register pressure too much.
4897  if (I->hasOneUse() ||
4898  isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4899  AddrModeInsts.push_back(I);
4900  return true;
4901  }
4902 
4903  // It isn't profitable to do this, roll back.
4904  AddrMode = BackupAddrMode;
4905  AddrModeInsts.resize(OldSize);
4906  TPT.rollback(LastKnownGood);
4907  }
4908  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4909  if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4910  return true;
4911  TPT.rollback(LastKnownGood);
4912  } else if (isa<ConstantPointerNull>(Addr)) {
4913  // Null pointer gets folded without affecting the addressing mode.
4914  return true;
4915  }
4916 
4917  // Worse case, the target should support [reg] addressing modes. :)
4918  if (!AddrMode.HasBaseReg) {
4919  AddrMode.HasBaseReg = true;
4920  AddrMode.BaseReg = Addr;
4921  // Still check for legality in case the target supports [imm] but not [i+r].
4922  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4923  return true;
4924  AddrMode.HasBaseReg = false;
4925  AddrMode.BaseReg = nullptr;
4926  }
4927 
4928  // If the base register is already taken, see if we can do [r+r].
4929  if (AddrMode.Scale == 0) {
4930  AddrMode.Scale = 1;
4931  AddrMode.ScaledReg = Addr;
4932  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4933  return true;
4934  AddrMode.Scale = 0;
4935  AddrMode.ScaledReg = nullptr;
4936  }
4937  // Couldn't match.
4938  TPT.rollback(LastKnownGood);
4939  return false;
4940 }
4941 
4942 /// Check to see if all uses of OpVal by the specified inline asm call are due
4943 /// to memory operands. If so, return true, otherwise return false.
4945  const TargetLowering &TLI,
4946  const TargetRegisterInfo &TRI) {
4947  const Function *F = CI->getFunction();
4948  TargetLowering::AsmOperandInfoVector TargetConstraints =
4949  TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4950 
4951  for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
4952  // Compute the constraint code and ConstraintType to use.
4953  TLI.ComputeConstraintToUse(OpInfo, SDValue());
4954 
4955  // If this asm operand is our Value*, and if it isn't an indirect memory
4956  // operand, we can't fold it! TODO: Also handle C_Address?
4957  if (OpInfo.CallOperandVal == OpVal &&
4958  (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4959  !OpInfo.isIndirect))
4960  return false;
4961  }
4962 
4963  return true;
4964 }
4965 
4966 // Max number of memory uses to look at before aborting the search to conserve
4967 // compile time.
4968 static constexpr int MaxMemoryUsesToScan = 20;
4969 
4970 /// Recursively walk all the uses of I until we find a memory use.
4971 /// If we find an obviously non-foldable instruction, return true.
4972 /// Add accessed addresses and types to MemoryUses.
4973 static bool FindAllMemoryUses(
4974  Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses,
4975  SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4976  const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4977  BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4978  // If we already considered this instruction, we're done.
4979  if (!ConsideredInsts.insert(I).second)
4980  return false;
4981 
4982  // If this is an obviously unfoldable instruction, bail out.
4983  if (!MightBeFoldableInst(I))
4984  return true;
4985 
4986  // Loop over all the uses, recursively processing them.
4987  for (Use &U : I->uses()) {
4988  // Conservatively return true if we're seeing a large number or a deep chain
4989  // of users. This avoids excessive compilation times in pathological cases.
4990  if (SeenInsts++ >= MaxMemoryUsesToScan)
4991  return true;
4992 
4993  Instruction *UserI = cast<Instruction>(U.getUser());
4994  if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4995  MemoryUses.push_back({U.get(), LI->getType()});
4996  continue;
4997  }
4998 
4999  if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5000  if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5001  return true; // Storing addr, not into addr.
5002  MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()});
5003  continue;
5004  }
5005 
5006  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5007  if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5008  return true; // Storing addr, not into addr.
5009  MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()});
5010  continue;
5011  }
5012 
5013  if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5014  if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5015  return true; // Storing addr, not into addr.
5016  MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()});
5017  continue;
5018  }
5019 
5020  if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5021  if (CI->hasFnAttr(Attribute::Cold)) {
5022  // If this is a cold call, we can sink the addressing calculation into
5023  // the cold path. See optimizeCallInst
5024  bool OptForSize =
5025  OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
5026  if (!OptForSize)
5027  continue;
5028  }
5029 
5030  InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5031  if (!IA)
5032  return true;
5033 
5034  // If this is a memory operand, we're cool, otherwise bail out.
5035  if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5036  return true;
5037  continue;
5038  }
5039 
5040  if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5041  PSI, BFI, SeenInsts))
5042  return true;
5043  }
5044 
5045  return false;
5046 }
5047 
5048 /// Return true if Val is already known to be live at the use site that we're
5049 /// folding it into. If so, there is no cost to include it in the addressing
5050 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5051 /// instruction already.
5052 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5053  Value *KnownLive1,
5054  Value *KnownLive2) {
5055  // If Val is either of the known-live values, we know it is live!
5056  if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5057  return true;
5058 
5059  // All values other than instructions and arguments (e.g. constants) are live.
5060  if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5061  return true;
5062 
5063  // If Val is a constant sized alloca in the entry block, it is live, this is
5064  // true because it is just a reference to the stack/frame pointer, which is
5065  // live for the whole function.
5066  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5067  if (AI->isStaticAlloca())
5068  return true;
5069 
5070  // Check to see if this value is already used in the memory instruction's
5071  // block. If so, it's already live into the block at the very least, so we
5072  // can reasonably fold it.
5073  return Val->isUsedInBasicBlock(MemoryInst->getParent());
5074 }
5075 
5076 /// It is possible for the addressing mode of the machine to fold the specified
5077 /// instruction into a load or store that ultimately uses it.
5078 /// However, the specified instruction has multiple uses.
5079 /// Given this, it may actually increase register pressure to fold it
5080 /// into the load. For example, consider this code:
5081 ///
5082 /// X = ...
5083 /// Y = X+1
5084 /// use(Y) -> nonload/store
5085 /// Z = Y+1
5086 /// load Z
5087 ///
5088 /// In this case, Y has multiple uses, and can be folded into the load of Z
5089 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
5090 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
5091 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
5092 /// number of computations either.
5093 ///
5094 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
5095 /// X was live across 'load Z' for other reasons, we actually *would* want to
5096 /// fold the addressing mode in the Z case. This would make Y die earlier.
5097 bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5098  Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5099  if (IgnoreProfitability)
5100  return true;
5101 
5102  // AMBefore is the addressing mode before this instruction was folded into it,
5103  // and AMAfter is the addressing mode after the instruction was folded. Get
5104  // the set of registers referenced by AMAfter and subtract out those
5105  // referenced by AMBefore: this is the set of values which folding in this
5106  // address extends the lifetime of.
5107  //
5108  // Note that there are only two potential values being referenced here,
5109  // BaseReg and ScaleReg (global addresses are always available, as are any
5110  // folded immediates).
5111  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5112 
5113  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5114  // lifetime wasn't extended by adding this instruction.
5115  if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5116  BaseReg = nullptr;
5117  if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5118  ScaledReg = nullptr;
5119 
5120  // If folding this instruction (and it's subexprs) didn't extend any live
5121  // ranges, we're ok with it.
5122  if (!BaseReg && !ScaledReg)
5123  return true;
5124 
5125  // If all uses of this instruction can have the address mode sunk into them,
5126  // we can remove the addressing mode and effectively trade one live register
5127  // for another (at worst.) In this context, folding an addressing mode into
5128  // the use is just a particularly nice way of sinking it.
5130  SmallPtrSet<Instruction *, 16> ConsideredInsts;
5131  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, PSI,
5132  BFI))
5133  return false; // Has a non-memory, non-foldable use!
5134 
5135  // Now that we know that all uses of this instruction are part of a chain of
5136  // computation involving only operations that could theoretically be folded
5137  // into a memory use, loop over each of these memory operation uses and see
5138  // if they could *actually* fold the instruction. The assumption is that
5139  // addressing modes are cheap and that duplicating the computation involved
5140  // many times is worthwhile, even on a fastpath. For sinking candidates
5141  // (i.e. cold call sites), this serves as a way to prevent excessive code
5142  // growth since most architectures have some reasonable small and fast way to
5143  // compute an effective address. (i.e LEA on x86)
5144  SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5145  for (const std::pair<Value *, Type *> &Pair : MemoryUses) {
5146  Value *Address = Pair.first;
5147  Type *AddressAccessTy = Pair.second;
5148  unsigned AS = Address->getType()->getPointerAddressSpace();
5149 
5150  // Do a match against the root of this address, ignoring profitability. This
5151  // will tell us if the addressing mode for the memory operation will
5152  // *actually* cover the shared instruction.
5154  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5155  0);
5156  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5157  TPT.getRestorationPoint();
5158  AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5159  AddressAccessTy, AS, MemoryInst, Result,
5160  InsertedInsts, PromotedInsts, TPT,
5161  LargeOffsetGEP, OptSize, PSI, BFI);
5162  Matcher.IgnoreProfitability = true;
5163  bool Success = Matcher.matchAddr(Address, 0);
5164  (void)Success;
5165  assert(Success && "Couldn't select *anything*?");
5166 
5167  // The match was to check the profitability, the changes made are not
5168  // part of the original matcher. Therefore, they should be dropped
5169  // otherwise the original matcher will not present the right state.
5170  TPT.rollback(LastKnownGood);
5171 
5172  // If the match didn't cover I, then it won't be shared by it.
5173  if (!is_contained(MatchedAddrModeInsts, I))
5174  return false;
5175 
5176  MatchedAddrModeInsts.clear();
5177  }
5178 
5179  return true;
5180 }
5181 
5182 /// Return true if the specified values are defined in a
5183 /// different basic block than BB.
5184 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5185  if (Instruction *I = dyn_cast<Instruction>(V))
5186  return I->getParent() != BB;
5187  return false;
5188 }
5189 
5190 /// Sink addressing mode computation immediate before MemoryInst if doing so
5191 /// can be done without increasing register pressure. The need for the
5192 /// register pressure constraint means this can end up being an all or nothing
5193 /// decision for all uses of the same addressing computation.
5194 ///
5195 /// Load and Store Instructions often have addressing modes that can do
5196 /// significant amounts of computation. As such, instruction selection will try
5197 /// to get the load or store to do as much computation as possible for the
5198 /// program. The problem is that isel can only see within a single block. As
5199 /// such, we sink as much legal addressing mode work into the block as possible.
5200 ///
5201 /// This method is used to optimize both load/store and inline asms with memory
5202 /// operands. It's also used to sink addressing computations feeding into cold
5203 /// call sites into their (cold) basic block.
5204 ///
5205 /// The motivation for handling sinking into cold blocks is that doing so can
5206 /// both enable other address mode sinking (by satisfying the register pressure
5207 /// constraint above), and reduce register pressure globally (by removing the
5208 /// addressing mode computation from the fast path entirely.).
5209 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5210  Type *AccessTy, unsigned AddrSpace) {
5211  Value *Repl = Addr;
5212 
5213  // Try to collapse single-value PHI nodes. This is necessary to undo
5214  // unprofitable PRE transformations.
5215  SmallVector<Value *, 8> worklist;
5216  SmallPtrSet<Value *, 16> Visited;
5217  worklist.push_back(Addr);
5218 
5219  // Use a worklist to iteratively look through PHI and select nodes, and
5220  // ensure that the addressing mode obtained from the non-PHI/select roots of
5221  // the graph are compatible.
5222  bool PhiOrSelectSeen = false;
5223  SmallVector<Instruction *, 16> AddrModeInsts;
5224  const SimplifyQuery SQ(*DL, TLInfo);
5225  AddressingModeCombiner AddrModes(SQ, Addr);
5226  TypePromotionTransaction TPT(RemovedInsts);
5227  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5228  TPT.getRestorationPoint();
5229  while (!worklist.empty()) {
5230  Value *V = worklist.pop_back_val();
5231 
5232  // We allow traversing cyclic Phi nodes.
5233  // In case of success after this loop we ensure that traversing through
5234  // Phi nodes ends up with all cases to compute address of the form
5235  // BaseGV + Base + Scale * Index + Offset
5236  // where Scale and Offset are constans and BaseGV, Base and Index
5237  // are exactly the same Values in all cases.
5238  // It means that BaseGV, Scale and Offset dominate our memory instruction
5239  // and have the same value as they had in address computation represented
5240  // as Phi. So we can safely sink address computation to memory instruction.
5241  if (!Visited.insert(V).second)
5242  continue;
5243 
5244  // For a PHI node, push all of its incoming values.
5245  if (PHINode *P = dyn_cast<PHINode>(V)) {
5246  append_range(worklist, P->incoming_values());
5247  PhiOrSelectSeen = true;
5248  continue;
5249  }
5250  // Similar for select.
5251  if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5252  worklist.push_back(SI->getFalseValue());
5253  worklist.push_back(SI->getTrueValue());
5254  PhiOrSelectSeen = true;
5255  continue;
5256  }
5257 
5258  // For non-PHIs, determine the addressing mode being computed. Note that
5259  // the result may differ depending on what other uses our candidate
5260  // addressing instructions might have.
5261  AddrModeInsts.clear();
5262  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5263  0);
5264  // Defer the query (and possible computation of) the dom tree to point of
5265  // actual use. It's expected that most address matches don't actually need
5266  // the domtree.
5267  auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5268  Function *F = MemoryInst->getParent()->getParent();
5269  return this->getDT(*F);
5270  };
5271  ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5272  V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5273  *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5274  BFI.get());
5275 
5276  GetElementPtrInst *GEP = LargeOffsetGEP.first;
5277  if (GEP && !NewGEPBases.count(GEP)) {
5278  // If splitting the underlying data structure can reduce the offset of a
5279  // GEP, collect the GEP. Skip the GEPs that are the new bases of
5280  // previously split data structures.
5281  LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5282  LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5283  }
5284 
5285  NewAddrMode.OriginalValue = V;
5286  if (!AddrModes.addNewAddrMode(NewAddrMode))
5287  break;
5288  }
5289 
5290  // Try to combine the AddrModes we've collected. If we couldn't collect any,
5291  // or we have multiple but either couldn't combine them or combining them
5292  // wouldn't do anything useful, bail out now.
5293  if (!AddrModes.combineAddrModes()) {
5294  TPT.rollback(LastKnownGood);
5295  return false;
5296  }
5297  bool Modified = TPT.commit();
5298 
5299  // Get the combined AddrMode (or the only AddrMode, if we only had one).
5300  ExtAddrMode AddrMode = AddrModes.getAddrMode();
5301 
5302  // If all the instructions matched are already in this BB, don't do anything.
5303  // If we saw a Phi node then it is not local definitely, and if we saw a
5304  // select then we want to push the address calculation past it even if it's
5305  // already in this BB.
5306  if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5307  return IsNonLocalValue(V, MemoryInst->getParent());
5308  })) {
5309  LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5310  << "\n");
5311  return Modified;
5312  }
5313 
5314  // Insert this computation right after this user. Since our caller is
5315  // scanning from the top of the BB to the bottom, reuse of the expr are
5316  // guaranteed to happen later.
5317  IRBuilder<> Builder(MemoryInst);
5318 
5319  // Now that we determined the addressing expression we want to use and know
5320  // that we have to sink it into this block. Check to see if we have already
5321  // done this for some other load/store instr in this block. If so, reuse
5322  // the computation. Before attempting reuse, check if the address is valid
5323  // as it may have been erased.
5324 
5325  WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5326 
5327  Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5328  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5329  if (SunkAddr) {
5330  LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5331  << " for " << *MemoryInst << "\n");
5332  if (SunkAddr->getType() != Addr->getType()) {
5333  if (SunkAddr->getType()->getPointerAddressSpace() !=
5334  Addr->getType()->getPointerAddressSpace() &&
5335  !DL->isNonIntegralPointerType(Addr->getType())) {
5336  // There are two reasons the address spaces might not match: a no-op
5337  // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5338  // ptrtoint/inttoptr pair to ensure we match the original semantics.
5339  // TODO: allow bitcast between different address space pointers with the
5340  // same size.
5341  SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5342  SunkAddr =
5343  Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5344  } else
5345  SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5346  }
5348  SubtargetInfo->addrSinkUsingGEPs())) {
5349  // By default, we use the GEP-based method when AA is used later. This
5350  // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5351  LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5352  << " for " << *MemoryInst << "\n");
5353  Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5354 
5355  // First, find the pointer.
5356  if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5357  ResultPtr = AddrMode.BaseReg;
5358  AddrMode.BaseReg = nullptr;
5359  }
5360 
5361  if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5362  // We can't add more than one pointer together, nor can we scale a
5363  // pointer (both of which seem meaningless).
5364  if (ResultPtr || AddrMode.Scale != 1)
5365  return Modified;
5366 
5367  ResultPtr = AddrMode.ScaledReg;
5368  AddrMode.Scale = 0;
5369  }
5370 
5371  // It is only safe to sign extend the BaseReg if we know that the math
5372  // required to create it did not overflow before we extend it. Since
5373  // the original IR value was tossed in favor of a constant back when
5374  // the AddrMode was created we need to bail out gracefully if widths
5375  // do not match instead of extending it.
5376  //
5377  // (See below for code to add the scale.)
5378  if (AddrMode.Scale) {
5379  Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5380  if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5381  cast<IntegerType>(ScaledRegTy)->getBitWidth())
5382  return Modified;
5383  }
5384 
5385  if (AddrMode.BaseGV) {
5386  if (ResultPtr)
5387  return Modified;
5388 
5389  ResultPtr = AddrMode.BaseGV;
5390  }
5391 
5392  // If the real base value actually came from an inttoptr, then the matcher
5393  // will look through it and provide only the integer value. In that case,
5394  // use it here.
5395  if (!DL->isNonIntegralPointerType(Addr->getType())) {
5396  if (!ResultPtr && AddrMode.BaseReg) {
5397  ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5398  "sunkaddr");
5399  AddrMode.BaseReg = nullptr;
5400  } else if (!ResultPtr && AddrMode.Scale == 1) {
5401  ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5402  "sunkaddr");
5403  AddrMode.Scale = 0;
5404  }
5405  }
5406 
5407  if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5408  !AddrMode.BaseOffs) {
5409  SunkAddr = Constant::getNullValue(Addr->getType());
5410  } else if (!ResultPtr) {
5411  return Modified;
5412  } else {
5413  Type *I8PtrTy =
5414  Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5415  Type *I8Ty = Builder.getInt8Ty();
5416 
5417  // Start with the base register. Do this first so that subsequent address
5418  // matching finds it last, which will prevent it from trying to match it
5419  // as the scaled value in case it happens to be a m