LLVM  15.0.0git
CodeGenPrepare.cpp
Go to the documentation of this file.
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/IR/Argument.h"
43 #include "llvm/IR/Attributes.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfo.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GlobalValue.h"
54 #include "llvm/IR/GlobalVariable.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instruction.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/IntrinsicsAArch64.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/MDBuilder.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Statepoint.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/ValueMap.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Pass.h"
79 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/Compiler.h"
82 #include "llvm/Support/Debug.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <limits>
99 #include <memory>
100 #include <utility>
101 #include <vector>
102 
103 using namespace llvm;
104 using namespace llvm::PatternMatch;
105 
106 #define DEBUG_TYPE "codegenprepare"
107 
108 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
109 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
110 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
111 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
112  "sunken Cmps");
113 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
114  "of sunken Casts");
115 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
116  "computations were sunk");
117 STATISTIC(NumMemoryInstsPhiCreated,
118  "Number of phis created when address "
119  "computations were sunk to memory instructions");
120 STATISTIC(NumMemoryInstsSelectCreated,
121  "Number of select created when address "
122  "computations were sunk to memory instructions");
123 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
124 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
125 STATISTIC(NumAndsAdded,
126  "Number of and mask instructions added to form ext loads");
127 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
128 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
129 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
130 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
131 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
132 
134  "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
135  cl::desc("Disable branch optimizations in CodeGenPrepare"));
136 
137 static cl::opt<bool>
138  DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
139  cl::desc("Disable GC optimizations in CodeGenPrepare"));
140 
142  "disable-cgp-select2branch", cl::Hidden, cl::init(false),
143  cl::desc("Disable select to branch conversion."));
144 
146  "addr-sink-using-gep", cl::Hidden, cl::init(true),
147  cl::desc("Address sinking in CGP using GEPs."));
148 
150  "enable-andcmp-sinking", cl::Hidden, cl::init(true),
151  cl::desc("Enable sinkinig and/cmp into branches."));
152 
154  "disable-cgp-store-extract", cl::Hidden, cl::init(false),
155  cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
156 
158  "stress-cgp-store-extract", cl::Hidden, cl::init(false),
159  cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
160 
162  "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
163  cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
164  "CodeGenPrepare"));
165 
167  "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
168  cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
169  "optimization in CodeGenPrepare"));
170 
172  "disable-preheader-prot", cl::Hidden, cl::init(false),
173  cl::desc("Disable protection against removing loop preheaders"));
174 
176  "profile-guided-section-prefix", cl::Hidden, cl::init(true),
177  cl::desc("Use profile info to add section prefix for hot/cold functions"));
178 
180  "profile-unknown-in-special-section", cl::Hidden,
181  cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
182  "profile, we cannot tell the function is cold for sure because "
183  "it may be a function newly added without ever being sampled. "
184  "With the flag enabled, compiler can put such profile unknown "
185  "functions into a special section, so runtime system can choose "
186  "to handle it in a different way than .text section, to save "
187  "RAM for example. "));
188 
190  "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
191  cl::desc("Use the basic-block-sections profile to determine the text "
192  "section prefix for hot functions. Functions with "
193  "basic-block-sections profile will be placed in `.text.hot` "
194  "regardless of their FDO profile info. Other functions won't be "
195  "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
196  "profiles."));
197 
199  "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
200  cl::desc("Skip merging empty blocks if (frequency of empty block) / "
201  "(frequency of destination block) is greater than this ratio"));
202 
204  "force-split-store", cl::Hidden, cl::init(false),
205  cl::desc("Force store splitting no matter what the target query says."));
206 
207 static cl::opt<bool>
208 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
209  cl::desc("Enable merging of redundant sexts when one is dominating"
210  " the other."), cl::init(true));
211 
213  "disable-complex-addr-modes", cl::Hidden, cl::init(false),
214  cl::desc("Disables combining addressing modes with different parts "
215  "in optimizeMemoryInst."));
216 
217 static cl::opt<bool>
218 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
219  cl::desc("Allow creation of Phis in Address sinking."));
220 
221 static cl::opt<bool>
222 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true),
223  cl::desc("Allow creation of selects in Address sinking."));
224 
226  "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
227  cl::desc("Allow combining of BaseReg field in Address sinking."));
228 
230  "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
231  cl::desc("Allow combining of BaseGV field in Address sinking."));
232 
234  "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
235  cl::desc("Allow combining of BaseOffs field in Address sinking."));
236 
238  "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
239  cl::desc("Allow combining of ScaledReg field in Address sinking."));
240 
241 static cl::opt<bool>
242  EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
243  cl::init(true),
244  cl::desc("Enable splitting large offset of GEP."));
245 
247  "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
248  cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
249 
250 static cl::opt<bool>
251  VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
252  cl::desc("Enable BFI update verification for "
253  "CodeGenPrepare."));
254 
256  "cgp-optimize-phi-types", cl::Hidden, cl::init(false),
257  cl::desc("Enable converting phi types in CodeGenPrepare"));
258 
259 namespace {
260 
261 enum ExtType {
262  ZeroExtension, // Zero extension has been seen.
263  SignExtension, // Sign extension has been seen.
264  BothExtension // This extension type is used if we saw sext after
265  // ZeroExtension had been set, or if we saw zext after
266  // SignExtension had been set. It makes the type
267  // information of a promoted instruction invalid.
268 };
269 
270 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
271 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
272 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
273 using SExts = SmallVector<Instruction *, 16>;
274 using ValueToSExts = DenseMap<Value *, SExts>;
275 
276 class TypePromotionTransaction;
277 
278  class CodeGenPrepare : public FunctionPass {
279  const TargetMachine *TM = nullptr;
280  const TargetSubtargetInfo *SubtargetInfo;
281  const TargetLowering *TLI = nullptr;
282  const TargetRegisterInfo *TRI;
283  const TargetTransformInfo *TTI = nullptr;
284  const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
285  const TargetLibraryInfo *TLInfo;
286  const LoopInfo *LI;
287  std::unique_ptr<BlockFrequencyInfo> BFI;
288  std::unique_ptr<BranchProbabilityInfo> BPI;
289  ProfileSummaryInfo *PSI;
290 
291  /// As we scan instructions optimizing them, this is the next instruction
292  /// to optimize. Transforms that can invalidate this should update it.
293  BasicBlock::iterator CurInstIterator;
294 
295  /// Keeps track of non-local addresses that have been sunk into a block.
296  /// This allows us to avoid inserting duplicate code for blocks with
297  /// multiple load/stores of the same address. The usage of WeakTrackingVH
298  /// enables SunkAddrs to be treated as a cache whose entries can be
299  /// invalidated if a sunken address computation has been erased.
301 
302  /// Keeps track of all instructions inserted for the current function.
303  SetOfInstrs InsertedInsts;
304 
305  /// Keeps track of the type of the related instruction before their
306  /// promotion for the current function.
307  InstrToOrigTy PromotedInsts;
308 
309  /// Keep track of instructions removed during promotion.
310  SetOfInstrs RemovedInsts;
311 
312  /// Keep track of sext chains based on their initial value.
313  DenseMap<Value *, Instruction *> SeenChainsForSExt;
314 
315  /// Keep track of GEPs accessing the same data structures such as structs or
316  /// arrays that are candidates to be split later because of their large
317  /// size.
318  MapVector<
321  LargeOffsetGEPMap;
322 
323  /// Keep track of new GEP base after splitting the GEPs having large offset.
324  SmallSet<AssertingVH<Value>, 2> NewGEPBases;
325 
326  /// Map serial numbers to Large offset GEPs.
327  DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
328 
329  /// Keep track of SExt promoted.
330  ValueToSExts ValToSExtendedUses;
331 
332  /// True if the function has the OptSize attribute.
333  bool OptSize;
334 
335  /// DataLayout for the Function being processed.
336  const DataLayout *DL = nullptr;
337 
338  /// Building the dominator tree can be expensive, so we only build it
339  /// lazily and update it when required.
340  std::unique_ptr<DominatorTree> DT;
341 
342  public:
343  static char ID; // Pass identification, replacement for typeid
344 
345  CodeGenPrepare() : FunctionPass(ID) {
347  }
348 
349  bool runOnFunction(Function &F) override;
350 
351  StringRef getPassName() const override { return "CodeGen Prepare"; }
352 
353  void getAnalysisUsage(AnalysisUsage &AU) const override {
354  // FIXME: When we can selectively preserve passes, preserve the domtree.
361  }
362 
363  private:
364  template <typename F>
365  void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
366  // Substituting can cause recursive simplifications, which can invalidate
367  // our iterator. Use a WeakTrackingVH to hold onto it in case this
368  // happens.
369  Value *CurValue = &*CurInstIterator;
370  WeakTrackingVH IterHandle(CurValue);
371 
372  f();
373 
374  // If the iterator instruction was recursively deleted, start over at the
375  // start of the block.
376  if (IterHandle != CurValue) {
377  CurInstIterator = BB->begin();
378  SunkAddrs.clear();
379  }
380  }
381 
382  // Get the DominatorTree, building if necessary.
383  DominatorTree &getDT(Function &F) {
384  if (!DT)
385  DT = std::make_unique<DominatorTree>(F);
386  return *DT;
387  }
388 
389  void removeAllAssertingVHReferences(Value *V);
390  bool eliminateAssumptions(Function &F);
391  bool eliminateFallThrough(Function &F);
392  bool eliminateMostlyEmptyBlocks(Function &F);
393  BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
394  bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
395  void eliminateMostlyEmptyBlock(BasicBlock *BB);
396  bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
397  bool isPreheader);
398  bool makeBitReverse(Instruction &I);
399  bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
400  bool optimizeInst(Instruction *I, bool &ModifiedDT);
401  bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
402  Type *AccessTy, unsigned AddrSpace);
403  bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
404  bool optimizeInlineAsmInst(CallInst *CS);
405  bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
406  bool optimizeExt(Instruction *&I);
407  bool optimizeExtUses(Instruction *I);
408  bool optimizeLoadExt(LoadInst *Load);
409  bool optimizeShiftInst(BinaryOperator *BO);
410  bool optimizeFunnelShift(IntrinsicInst *Fsh);
411  bool optimizeSelectInst(SelectInst *SI);
412  bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
413  bool optimizeSwitchType(SwitchInst *SI);
414  bool optimizeSwitchPhiConstants(SwitchInst *SI);
415  bool optimizeSwitchInst(SwitchInst *SI);
416  bool optimizeExtractElementInst(Instruction *Inst);
417  bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT);
418  bool fixupDbgValue(Instruction *I);
419  bool placeDbgValues(Function &F);
420  bool placePseudoProbes(Function &F);
421  bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
422  LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
423  bool tryToPromoteExts(TypePromotionTransaction &TPT,
424  const SmallVectorImpl<Instruction *> &Exts,
425  SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
426  unsigned CreatedInstsCost = 0);
427  bool mergeSExts(Function &F);
428  bool splitLargeGEPOffsets();
429  bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
430  SmallPtrSetImpl<Instruction *> &DeletedInstrs);
431  bool optimizePhiTypes(Function &F);
432  bool performAddressTypePromotion(
433  Instruction *&Inst,
434  bool AllowPromotionWithoutCommonHeader,
435  bool HasPromoted, TypePromotionTransaction &TPT,
436  SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
437  bool splitBranchCondition(Function &F, bool &ModifiedDT);
438  bool simplifyOffsetableRelocate(GCStatepointInst &I);
439 
440  bool tryToSinkFreeOperands(Instruction *I);
441  bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0,
442  Value *Arg1, CmpInst *Cmp,
443  Intrinsic::ID IID);
444  bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT);
445  bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
446  bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
447  void verifyBFIUpdates(Function &F);
448  };
449 
450 } // end anonymous namespace
451 
452 char CodeGenPrepare::ID = 0;
453 
454 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
455  "Optimize for code generation", false, false)
463  "Optimize for code generation", false, false)
464 
465 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
466 
468  if (skipFunction(F))
469  return false;
470 
471  DL = &F.getParent()->getDataLayout();
472 
473  bool EverMadeChange = false;
474  // Clear per function information.
475  InsertedInsts.clear();
476  PromotedInsts.clear();
477 
478  TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
479  SubtargetInfo = TM->getSubtargetImpl(F);
480  TLI = SubtargetInfo->getTargetLowering();
481  TRI = SubtargetInfo->getRegisterInfo();
482  TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
483  TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
484  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
485  BPI.reset(new BranchProbabilityInfo(F, *LI));
486  BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
487  PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
488  BBSectionsProfileReader =
489  getAnalysisIfAvailable<BasicBlockSectionsProfileReader>();
490  OptSize = F.hasOptSize();
491  // Use the basic-block-sections profile to promote hot functions to .text.hot if requested.
492  if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
493  BBSectionsProfileReader->isFunctionHot(F.getName())) {
494  F.setSectionPrefix("hot");
495  } else if (ProfileGuidedSectionPrefix) {
496  // The hot attribute overwrites profile count based hotness while profile
497  // counts based hotness overwrite the cold attribute.
498  // This is a conservative behabvior.
499  if (F.hasFnAttribute(Attribute::Hot) ||
500  PSI->isFunctionHotInCallGraph(&F, *BFI))
501  F.setSectionPrefix("hot");
502  // If PSI shows this function is not hot, we will placed the function
503  // into unlikely section if (1) PSI shows this is a cold function, or
504  // (2) the function has a attribute of cold.
505  else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
506  F.hasFnAttribute(Attribute::Cold))
507  F.setSectionPrefix("unlikely");
510  F.setSectionPrefix("unknown");
511  }
512 
513  /// This optimization identifies DIV instructions that can be
514  /// profitably bypassed and carried out with a shorter, faster divide.
515  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
516  const DenseMap<unsigned int, unsigned int> &BypassWidths =
517  TLI->getBypassSlowDivWidths();
518  BasicBlock* BB = &*F.begin();
519  while (BB != nullptr) {
520  // bypassSlowDivision may create new BBs, but we don't want to reapply the
521  // optimization to those blocks.
522  BasicBlock* Next = BB->getNextNode();
523  // F.hasOptSize is already checked in the outer if statement.
524  if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
525  EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
526  BB = Next;
527  }
528  }
529 
530  // Get rid of @llvm.assume builtins before attempting to eliminate empty
531  // blocks, since there might be blocks that only contain @llvm.assume calls
532  // (plus arguments that we can get rid of).
533  EverMadeChange |= eliminateAssumptions(F);
534 
535  // Eliminate blocks that contain only PHI nodes and an
536  // unconditional branch.
537  EverMadeChange |= eliminateMostlyEmptyBlocks(F);
538 
539  bool ModifiedDT = false;
540  if (!DisableBranchOpts)
541  EverMadeChange |= splitBranchCondition(F, ModifiedDT);
542 
543  // Split some critical edges where one of the sources is an indirect branch,
544  // to help generate sane code for PHIs involving such edges.
545  EverMadeChange |=
546  SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
547 
548  bool MadeChange = true;
549  while (MadeChange) {
550  MadeChange = false;
551  DT.reset();
553  bool ModifiedDTOnIteration = false;
554  MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration);
555 
556  // Restart BB iteration if the dominator tree of the Function was changed
557  if (ModifiedDTOnIteration)
558  break;
559  }
560  if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
561  MadeChange |= mergeSExts(F);
562  if (!LargeOffsetGEPMap.empty())
563  MadeChange |= splitLargeGEPOffsets();
564  MadeChange |= optimizePhiTypes(F);
565 
566  if (MadeChange)
567  eliminateFallThrough(F);
568 
569  // Really free removed instructions during promotion.
570  for (Instruction *I : RemovedInsts)
571  I->deleteValue();
572 
573  EverMadeChange |= MadeChange;
574  SeenChainsForSExt.clear();
575  ValToSExtendedUses.clear();
576  RemovedInsts.clear();
577  LargeOffsetGEPMap.clear();
578  LargeOffsetGEPID.clear();
579  }
580 
581  NewGEPBases.clear();
582  SunkAddrs.clear();
583 
584  if (!DisableBranchOpts) {
585  MadeChange = false;
586  // Use a set vector to get deterministic iteration order. The order the
587  // blocks are removed may affect whether or not PHI nodes in successors
588  // are removed.
590  for (BasicBlock &BB : F) {
592  MadeChange |= ConstantFoldTerminator(&BB, true);
593  if (!MadeChange) continue;
594 
595  for (BasicBlock *Succ : Successors)
596  if (pred_empty(Succ))
597  WorkList.insert(Succ);
598  }
599 
600  // Delete the dead blocks and any of their dead successors.
601  MadeChange |= !WorkList.empty();
602  while (!WorkList.empty()) {
603  BasicBlock *BB = WorkList.pop_back_val();
605 
607 
608  for (BasicBlock *Succ : Successors)
609  if (pred_empty(Succ))
610  WorkList.insert(Succ);
611  }
612 
613  // Merge pairs of basic blocks with unconditional branches, connected by
614  // a single edge.
615  if (EverMadeChange || MadeChange)
616  MadeChange |= eliminateFallThrough(F);
617 
618  EverMadeChange |= MadeChange;
619  }
620 
621  if (!DisableGCOpts) {
623  for (BasicBlock &BB : F)
624  for (Instruction &I : BB)
625  if (auto *SP = dyn_cast<GCStatepointInst>(&I))
626  Statepoints.push_back(SP);
627  for (auto &I : Statepoints)
628  EverMadeChange |= simplifyOffsetableRelocate(*I);
629  }
630 
631  // Do this last to clean up use-before-def scenarios introduced by other
632  // preparatory transforms.
633  EverMadeChange |= placeDbgValues(F);
634  EverMadeChange |= placePseudoProbes(F);
635 
636 #ifndef NDEBUG
637  if (VerifyBFIUpdates)
638  verifyBFIUpdates(F);
639 #endif
640 
641  return EverMadeChange;
642 }
643 
644 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
645  bool MadeChange = false;
646  for (BasicBlock &BB : F) {
647  CurInstIterator = BB.begin();
648  while (CurInstIterator != BB.end()) {
649  Instruction *I = &*(CurInstIterator++);
650  if (auto *Assume = dyn_cast<AssumeInst>(I)) {
651  MadeChange = true;
652  Value *Operand = Assume->getOperand(0);
653  Assume->eraseFromParent();
654 
655  resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
656  RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
657  });
658  }
659  }
660  }
661  return MadeChange;
662 }
663 
664 /// An instruction is about to be deleted, so remove all references to it in our
665 /// GEP-tracking data strcutures.
666 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
667  LargeOffsetGEPMap.erase(V);
668  NewGEPBases.erase(V);
669 
670  auto GEP = dyn_cast<GetElementPtrInst>(V);
671  if (!GEP)
672  return;
673 
674  LargeOffsetGEPID.erase(GEP);
675 
676  auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
677  if (VecI == LargeOffsetGEPMap.end())
678  return;
679 
680  auto &GEPVector = VecI->second;
681  llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
682 
683  if (GEPVector.empty())
684  LargeOffsetGEPMap.erase(VecI);
685 }
686 
687 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
688 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
689  DominatorTree NewDT(F);
690  LoopInfo NewLI(NewDT);
691  BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
692  BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
693  NewBFI.verifyMatch(*BFI);
694 }
695 
696 /// Merge basic blocks which are connected by a single edge, where one of the
697 /// basic blocks has a single successor pointing to the other basic block,
698 /// which has a single predecessor.
699 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
700  bool Changed = false;
701  // Scan all of the blocks in the function, except for the entry block.
702  // Use a temporary array to avoid iterator being invalidated when
703  // deleting blocks.
705  for (auto &Block : llvm::drop_begin(F))
706  Blocks.push_back(&Block);
707 
709  for (auto &Block : Blocks) {
710  auto *BB = cast_or_null<BasicBlock>(Block);
711  if (!BB)
712  continue;
713  // If the destination block has a single pred, then this is a trivial
714  // edge, just collapse it.
715  BasicBlock *SinglePred = BB->getSinglePredecessor();
716 
717  // Don't merge if BB's address is taken.
718  if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
719 
720  BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
721  if (Term && !Term->isConditional()) {
722  Changed = true;
723  LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
724 
725  // Merge BB into SinglePred and delete it.
727  Preds.insert(SinglePred);
728  }
729  }
730 
731  // (Repeatedly) merging blocks into their predecessors can create redundant
732  // debug intrinsics.
733  for (auto &Pred : Preds)
734  if (auto *BB = cast_or_null<BasicBlock>(Pred))
736 
737  return Changed;
738 }
739 
740 /// Find a destination block from BB if BB is mergeable empty block.
741 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
742  // If this block doesn't end with an uncond branch, ignore it.
743  BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
744  if (!BI || !BI->isUnconditional())
745  return nullptr;
746 
747  // If the instruction before the branch (skipping debug info) isn't a phi
748  // node, then other stuff is happening here.
749  BasicBlock::iterator BBI = BI->getIterator();
750  if (BBI != BB->begin()) {
751  --BBI;
752  while (isa<DbgInfoIntrinsic>(BBI)) {
753  if (BBI == BB->begin())
754  break;
755  --BBI;
756  }
757  if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
758  return nullptr;
759  }
760 
761  // Do not break infinite loops.
762  BasicBlock *DestBB = BI->getSuccessor(0);
763  if (DestBB == BB)
764  return nullptr;
765 
766  if (!canMergeBlocks(BB, DestBB))
767  DestBB = nullptr;
768 
769  return DestBB;
770 }
771 
772 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
773 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
774 /// edges in ways that are non-optimal for isel. Start by eliminating these
775 /// blocks so we can split them the way we want them.
776 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
778  SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
779  while (!LoopList.empty()) {
780  Loop *L = LoopList.pop_back_val();
781  llvm::append_range(LoopList, *L);
782  if (BasicBlock *Preheader = L->getLoopPreheader())
783  Preheaders.insert(Preheader);
784  }
785 
786  bool MadeChange = false;
787  // Copy blocks into a temporary array to avoid iterator invalidation issues
788  // as we remove them.
789  // Note that this intentionally skips the entry block.
791  for (auto &Block : llvm::drop_begin(F))
792  Blocks.push_back(&Block);
793 
794  for (auto &Block : Blocks) {
795  BasicBlock *BB = cast_or_null<BasicBlock>(Block);
796  if (!BB)
797  continue;
798  BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
799  if (!DestBB ||
800  !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
801  continue;
802 
803  eliminateMostlyEmptyBlock(BB);
804  MadeChange = true;
805  }
806  return MadeChange;
807 }
808 
809 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
810  BasicBlock *DestBB,
811  bool isPreheader) {
812  // Do not delete loop preheaders if doing so would create a critical edge.
813  // Loop preheaders can be good locations to spill registers. If the
814  // preheader is deleted and we create a critical edge, registers may be
815  // spilled in the loop body instead.
816  if (!DisablePreheaderProtect && isPreheader &&
817  !(BB->getSinglePredecessor() &&
818  BB->getSinglePredecessor()->getSingleSuccessor()))
819  return false;
820 
821  // Skip merging if the block's successor is also a successor to any callbr
822  // that leads to this block.
823  // FIXME: Is this really needed? Is this a correctness issue?
824  for (BasicBlock *Pred : predecessors(BB)) {
825  if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
826  for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
827  if (DestBB == CBI->getSuccessor(i))
828  return false;
829  }
830 
831  // Try to skip merging if the unique predecessor of BB is terminated by a
832  // switch or indirect branch instruction, and BB is used as an incoming block
833  // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
834  // add COPY instructions in the predecessor of BB instead of BB (if it is not
835  // merged). Note that the critical edge created by merging such blocks wont be
836  // split in MachineSink because the jump table is not analyzable. By keeping
837  // such empty block (BB), ISel will place COPY instructions in BB, not in the
838  // predecessor of BB.
839  BasicBlock *Pred = BB->getUniquePredecessor();
840  if (!Pred ||
841  !(isa<SwitchInst>(Pred->getTerminator()) ||
842  isa<IndirectBrInst>(Pred->getTerminator())))
843  return true;
844 
845  if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
846  return true;
847 
848  // We use a simple cost heuristic which determine skipping merging is
849  // profitable if the cost of skipping merging is less than the cost of
850  // merging : Cost(skipping merging) < Cost(merging BB), where the
851  // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
852  // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
853  // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
854  // Freq(Pred) / Freq(BB) > 2.
855  // Note that if there are multiple empty blocks sharing the same incoming
856  // value for the PHIs in the DestBB, we consider them together. In such
857  // case, Cost(merging BB) will be the sum of their frequencies.
858 
859  if (!isa<PHINode>(DestBB->begin()))
860  return true;
861 
862  SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
863 
864  // Find all other incoming blocks from which incoming values of all PHIs in
865  // DestBB are the same as the ones from BB.
866  for (BasicBlock *DestBBPred : predecessors(DestBB)) {
867  if (DestBBPred == BB)
868  continue;
869 
870  if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
871  return DestPN.getIncomingValueForBlock(BB) ==
872  DestPN.getIncomingValueForBlock(DestBBPred);
873  }))
874  SameIncomingValueBBs.insert(DestBBPred);
875  }
876 
877  // See if all BB's incoming values are same as the value from Pred. In this
878  // case, no reason to skip merging because COPYs are expected to be place in
879  // Pred already.
880  if (SameIncomingValueBBs.count(Pred))
881  return true;
882 
883  BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
884  BlockFrequency BBFreq = BFI->getBlockFreq(BB);
885 
886  for (auto *SameValueBB : SameIncomingValueBBs)
887  if (SameValueBB->getUniquePredecessor() == Pred &&
888  DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
889  BBFreq += BFI->getBlockFreq(SameValueBB);
890 
891  return PredFreq.getFrequency() <=
893 }
894 
895 /// Return true if we can merge BB into DestBB if there is a single
896 /// unconditional branch between them, and BB contains no other non-phi
897 /// instructions.
898 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
899  const BasicBlock *DestBB) const {
900  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
901  // the successor. If there are more complex condition (e.g. preheaders),
902  // don't mess around with them.
903  for (const PHINode &PN : BB->phis()) {
904  for (const User *U : PN.users()) {
905  const Instruction *UI = cast<Instruction>(U);
906  if (UI->getParent() != DestBB || !isa<PHINode>(UI))
907  return false;
908  // If User is inside DestBB block and it is a PHINode then check
909  // incoming value. If incoming value is not from BB then this is
910  // a complex condition (e.g. preheaders) we want to avoid here.
911  if (UI->getParent() == DestBB) {
912  if (const PHINode *UPN = dyn_cast<PHINode>(UI))
913  for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
914  Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
915  if (Insn && Insn->getParent() == BB &&
916  Insn->getParent() != UPN->getIncomingBlock(I))
917  return false;
918  }
919  }
920  }
921  }
922 
923  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
924  // and DestBB may have conflicting incoming values for the block. If so, we
925  // can't merge the block.
926  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
927  if (!DestBBPN) return true; // no conflict.
928 
929  // Collect the preds of BB.
931  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
932  // It is faster to get preds from a PHI than with pred_iterator.
933  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
934  BBPreds.insert(BBPN->getIncomingBlock(i));
935  } else {
936  BBPreds.insert(pred_begin(BB), pred_end(BB));
937  }
938 
939  // Walk the preds of DestBB.
940  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
941  BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
942  if (BBPreds.count(Pred)) { // Common predecessor?
943  for (const PHINode &PN : DestBB->phis()) {
944  const Value *V1 = PN.getIncomingValueForBlock(Pred);
945  const Value *V2 = PN.getIncomingValueForBlock(BB);
946 
947  // If V2 is a phi node in BB, look up what the mapped value will be.
948  if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
949  if (V2PN->getParent() == BB)
950  V2 = V2PN->getIncomingValueForBlock(Pred);
951 
952  // If there is a conflict, bail out.
953  if (V1 != V2) return false;
954  }
955  }
956  }
957 
958  return true;
959 }
960 
961 /// Eliminate a basic block that has only phi's and an unconditional branch in
962 /// it.
963 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
964  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
965  BasicBlock *DestBB = BI->getSuccessor(0);
966 
967  LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
968  << *BB << *DestBB);
969 
970  // If the destination block has a single pred, then this is a trivial edge,
971  // just collapse it.
972  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
973  if (SinglePred != DestBB) {
974  assert(SinglePred == BB &&
975  "Single predecessor not the same as predecessor");
976  // Merge DestBB into SinglePred/BB and delete it.
978  // Note: BB(=SinglePred) will not be deleted on this path.
979  // DestBB(=its single successor) is the one that was deleted.
980  LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
981  return;
982  }
983  }
984 
985  // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
986  // to handle the new incoming edges it is about to have.
987  for (PHINode &PN : DestBB->phis()) {
988  // Remove the incoming value for BB, and remember it.
989  Value *InVal = PN.removeIncomingValue(BB, false);
990 
991  // Two options: either the InVal is a phi node defined in BB or it is some
992  // value that dominates BB.
993  PHINode *InValPhi = dyn_cast<PHINode>(InVal);
994  if (InValPhi && InValPhi->getParent() == BB) {
995  // Add all of the input values of the input PHI as inputs of this phi.
996  for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
997  PN.addIncoming(InValPhi->getIncomingValue(i),
998  InValPhi->getIncomingBlock(i));
999  } else {
1000  // Otherwise, add one instance of the dominating value for each edge that
1001  // we will be adding.
1002  if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1003  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1004  PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1005  } else {
1006  for (BasicBlock *Pred : predecessors(BB))
1007  PN.addIncoming(InVal, Pred);
1008  }
1009  }
1010  }
1011 
1012  // The PHIs are now updated, change everything that refers to BB to use
1013  // DestBB and remove BB.
1014  BB->replaceAllUsesWith(DestBB);
1015  BB->eraseFromParent();
1016  ++NumBlocksElim;
1017 
1018  LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1019 }
1020 
1021 // Computes a map of base pointer relocation instructions to corresponding
1022 // derived pointer relocation instructions given a vector of all relocate calls
1024  const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1026  &RelocateInstMap) {
1027  // Collect information in two maps: one primarily for locating the base object
1028  // while filling the second map; the second map is the final structure holding
1029  // a mapping between Base and corresponding Derived relocate calls
1031  for (auto *ThisRelocate : AllRelocateCalls) {
1032  auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1033  ThisRelocate->getDerivedPtrIndex());
1034  RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1035  }
1036  for (auto &Item : RelocateIdxMap) {
1037  std::pair<unsigned, unsigned> Key = Item.first;
1038  if (Key.first == Key.second)
1039  // Base relocation: nothing to insert
1040  continue;
1041 
1042  GCRelocateInst *I = Item.second;
1043  auto BaseKey = std::make_pair(Key.first, Key.first);
1044 
1045  // We're iterating over RelocateIdxMap so we cannot modify it.
1046  auto MaybeBase = RelocateIdxMap.find(BaseKey);
1047  if (MaybeBase == RelocateIdxMap.end())
1048  // TODO: We might want to insert a new base object relocate and gep off
1049  // that, if there are enough derived object relocates.
1050  continue;
1051 
1052  RelocateInstMap[MaybeBase->second].push_back(I);
1053  }
1054 }
1055 
1056 // Accepts a GEP and extracts the operands into a vector provided they're all
1057 // small integer constants
1059  SmallVectorImpl<Value *> &OffsetV) {
1060  for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1061  // Only accept small constant integer operands
1062  auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1063  if (!Op || Op->getZExtValue() > 20)
1064  return false;
1065  }
1066 
1067  for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1068  OffsetV.push_back(GEP->getOperand(i));
1069  return true;
1070 }
1071 
1072 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1073 // replace, computes a replacement, and affects it.
1074 static bool
1076  const SmallVectorImpl<GCRelocateInst *> &Targets) {
1077  bool MadeChange = false;
1078  // We must ensure the relocation of derived pointer is defined after
1079  // relocation of base pointer. If we find a relocation corresponding to base
1080  // defined earlier than relocation of base then we move relocation of base
1081  // right before found relocation. We consider only relocation in the same
1082  // basic block as relocation of base. Relocations from other basic block will
1083  // be skipped by optimization and we do not care about them.
1084  for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1085  &*R != RelocatedBase; ++R)
1086  if (auto *RI = dyn_cast<GCRelocateInst>(R))
1087  if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1088  if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1089  RelocatedBase->moveBefore(RI);
1090  break;
1091  }
1092 
1093  for (GCRelocateInst *ToReplace : Targets) {
1094  assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1095  "Not relocating a derived object of the original base object");
1096  if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1097  // A duplicate relocate call. TODO: coalesce duplicates.
1098  continue;
1099  }
1100 
1101  if (RelocatedBase->getParent() != ToReplace->getParent()) {
1102  // Base and derived relocates are in different basic blocks.
1103  // In this case transform is only valid when base dominates derived
1104  // relocate. However it would be too expensive to check dominance
1105  // for each such relocate, so we skip the whole transformation.
1106  continue;
1107  }
1108 
1109  Value *Base = ToReplace->getBasePtr();
1110  auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1111  if (!Derived || Derived->getPointerOperand() != Base)
1112  continue;
1113 
1114  SmallVector<Value *, 2> OffsetV;
1115  if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1116  continue;
1117 
1118  // Create a Builder and replace the target callsite with a gep
1119  assert(RelocatedBase->getNextNode() &&
1120  "Should always have one since it's not a terminator");
1121 
1122  // Insert after RelocatedBase
1123  IRBuilder<> Builder(RelocatedBase->getNextNode());
1124  Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1125 
1126  // If gc_relocate does not match the actual type, cast it to the right type.
1127  // In theory, there must be a bitcast after gc_relocate if the type does not
1128  // match, and we should reuse it to get the derived pointer. But it could be
1129  // cases like this:
1130  // bb1:
1131  // ...
1132  // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1133  // br label %merge
1134  //
1135  // bb2:
1136  // ...
1137  // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1138  // br label %merge
1139  //
1140  // merge:
1141  // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1142  // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1143  //
1144  // In this case, we can not find the bitcast any more. So we insert a new bitcast
1145  // no matter there is already one or not. In this way, we can handle all cases, and
1146  // the extra bitcast should be optimized away in later passes.
1147  Value *ActualRelocatedBase = RelocatedBase;
1148  if (RelocatedBase->getType() != Base->getType()) {
1149  ActualRelocatedBase =
1150  Builder.CreateBitCast(RelocatedBase, Base->getType());
1151  }
1152  Value *Replacement = Builder.CreateGEP(
1153  Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1154  Replacement->takeName(ToReplace);
1155  // If the newly generated derived pointer's type does not match the original derived
1156  // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1157  Value *ActualReplacement = Replacement;
1158  if (Replacement->getType() != ToReplace->getType()) {
1159  ActualReplacement =
1160  Builder.CreateBitCast(Replacement, ToReplace->getType());
1161  }
1162  ToReplace->replaceAllUsesWith(ActualReplacement);
1163  ToReplace->eraseFromParent();
1164 
1165  MadeChange = true;
1166  }
1167  return MadeChange;
1168 }
1169 
1170 // Turns this:
1171 //
1172 // %base = ...
1173 // %ptr = gep %base + 15
1174 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1175 // %base' = relocate(%tok, i32 4, i32 4)
1176 // %ptr' = relocate(%tok, i32 4, i32 5)
1177 // %val = load %ptr'
1178 //
1179 // into this:
1180 //
1181 // %base = ...
1182 // %ptr = gep %base + 15
1183 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1184 // %base' = gc.relocate(%tok, i32 4, i32 4)
1185 // %ptr' = gep %base' + 15
1186 // %val = load %ptr'
1187 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1188  bool MadeChange = false;
1189  SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1190  for (auto *U : I.users())
1191  if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1192  // Collect all the relocate calls associated with a statepoint
1193  AllRelocateCalls.push_back(Relocate);
1194 
1195  // We need at least one base pointer relocation + one derived pointer
1196  // relocation to mangle
1197  if (AllRelocateCalls.size() < 2)
1198  return false;
1199 
1200  // RelocateInstMap is a mapping from the base relocate instruction to the
1201  // corresponding derived relocate instructions
1203  computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1204  if (RelocateInstMap.empty())
1205  return false;
1206 
1207  for (auto &Item : RelocateInstMap)
1208  // Item.first is the RelocatedBase to offset against
1209  // Item.second is the vector of Targets to replace
1210  MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1211  return MadeChange;
1212 }
1213 
1214 /// Sink the specified cast instruction into its user blocks.
1215 static bool SinkCast(CastInst *CI) {
1216  BasicBlock *DefBB = CI->getParent();
1217 
1218  /// InsertedCasts - Only insert a cast in each block once.
1219  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1220 
1221  bool MadeChange = false;
1222  for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1223  UI != E; ) {
1224  Use &TheUse = UI.getUse();
1225  Instruction *User = cast<Instruction>(*UI);
1226 
1227  // Figure out which BB this cast is used in. For PHI's this is the
1228  // appropriate predecessor block.
1229  BasicBlock *UserBB = User->getParent();
1230  if (PHINode *PN = dyn_cast<PHINode>(User)) {
1231  UserBB = PN->getIncomingBlock(TheUse);
1232  }
1233 
1234  // Preincrement use iterator so we don't invalidate it.
1235  ++UI;
1236 
1237  // The first insertion point of a block containing an EH pad is after the
1238  // pad. If the pad is the user, we cannot sink the cast past the pad.
1239  if (User->isEHPad())
1240  continue;
1241 
1242  // If the block selected to receive the cast is an EH pad that does not
1243  // allow non-PHI instructions before the terminator, we can't sink the
1244  // cast.
1245  if (UserBB->getTerminator()->isEHPad())
1246  continue;
1247 
1248  // If this user is in the same block as the cast, don't change the cast.
1249  if (UserBB == DefBB) continue;
1250 
1251  // If we have already inserted a cast into this block, use it.
1252  CastInst *&InsertedCast = InsertedCasts[UserBB];
1253 
1254  if (!InsertedCast) {
1255  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1256  assert(InsertPt != UserBB->end());
1257  InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1258  CI->getType(), "", &*InsertPt);
1259  InsertedCast->setDebugLoc(CI->getDebugLoc());
1260  }
1261 
1262  // Replace a use of the cast with a use of the new cast.
1263  TheUse = InsertedCast;
1264  MadeChange = true;
1265  ++NumCastUses;
1266  }
1267 
1268  // If we removed all uses, nuke the cast.
1269  if (CI->use_empty()) {
1270  salvageDebugInfo(*CI);
1271  CI->eraseFromParent();
1272  MadeChange = true;
1273  }
1274 
1275  return MadeChange;
1276 }
1277 
1278 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1279 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1280 /// reduce the number of virtual registers that must be created and coalesced.
1281 ///
1282 /// Return true if any changes are made.
1284  const DataLayout &DL) {
1285  // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1286  // than sinking only nop casts, but is helpful on some platforms.
1287  if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1288  if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1289  ASC->getDestAddressSpace()))
1290  return false;
1291  }
1292 
1293  // If this is a noop copy,
1294  EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1295  EVT DstVT = TLI.getValueType(DL, CI->getType());
1296 
1297  // This is an fp<->int conversion?
1298  if (SrcVT.isInteger() != DstVT.isInteger())
1299  return false;
1300 
1301  // If this is an extension, it will be a zero or sign extension, which
1302  // isn't a noop.
1303  if (SrcVT.bitsLT(DstVT)) return false;
1304 
1305  // If these values will be promoted, find out what they will be promoted
1306  // to. This helps us consider truncates on PPC as noop copies when they
1307  // are.
1308  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1310  SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1311  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1313  DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1314 
1315  // If, after promotion, these are the same types, this is a noop copy.
1316  if (SrcVT != DstVT)
1317  return false;
1318 
1319  return SinkCast(CI);
1320 }
1321 
1322 // Match a simple increment by constant operation. Note that if a sub is
1323 // matched, the step is negated (as if the step had been canonicalized to
1324 // an add, even though we leave the instruction alone.)
1326  Constant *&Step) {
1327  if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1328  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1329  m_Instruction(LHS), m_Constant(Step)))))
1330  return true;
1331  if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1332  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1333  m_Instruction(LHS), m_Constant(Step))))) {
1334  Step = ConstantExpr::getNeg(Step);
1335  return true;
1336  }
1337  return false;
1338 }
1339 
1340 /// If given \p PN is an inductive variable with value IVInc coming from the
1341 /// backedge, and on each iteration it gets increased by Step, return pair
1342 /// <IVInc, Step>. Otherwise, return None.
1344 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1345  const Loop *L = LI->getLoopFor(PN->getParent());
1346  if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1347  return None;
1348  auto *IVInc =
1349  dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1350  if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1351  return None;
1352  Instruction *LHS = nullptr;
1353  Constant *Step = nullptr;
1354  if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1355  return std::make_pair(IVInc, Step);
1356  return None;
1357 }
1358 
1359 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1360  auto *I = dyn_cast<Instruction>(V);
1361  if (!I)
1362  return false;
1363  Instruction *LHS = nullptr;
1364  Constant *Step = nullptr;
1365  if (!matchIncrement(I, LHS, Step))
1366  return false;
1367  if (auto *PN = dyn_cast<PHINode>(LHS))
1368  if (auto IVInc = getIVIncrement(PN, LI))
1369  return IVInc->first == I;
1370  return false;
1371 }
1372 
1373 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1374  Value *Arg0, Value *Arg1,
1375  CmpInst *Cmp,
1376  Intrinsic::ID IID) {
1377  auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1378  if (!isIVIncrement(BO, LI))
1379  return false;
1380  const Loop *L = LI->getLoopFor(BO->getParent());
1381  assert(L && "L should not be null after isIVIncrement()");
1382  // Do not risk on moving increment into a child loop.
1383  if (LI->getLoopFor(Cmp->getParent()) != L)
1384  return false;
1385 
1386  // Finally, we need to ensure that the insert point will dominate all
1387  // existing uses of the increment.
1388 
1389  auto &DT = getDT(*BO->getParent()->getParent());
1390  if (DT.dominates(Cmp->getParent(), BO->getParent()))
1391  // If we're moving up the dom tree, all uses are trivially dominated.
1392  // (This is the common case for code produced by LSR.)
1393  return true;
1394 
1395  // Otherwise, special case the single use in the phi recurrence.
1396  return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1397  };
1398  if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1399  // We used to use a dominator tree here to allow multi-block optimization.
1400  // But that was problematic because:
1401  // 1. It could cause a perf regression by hoisting the math op into the
1402  // critical path.
1403  // 2. It could cause a perf regression by creating a value that was live
1404  // across multiple blocks and increasing register pressure.
1405  // 3. Use of a dominator tree could cause large compile-time regression.
1406  // This is because we recompute the DT on every change in the main CGP
1407  // run-loop. The recomputing is probably unnecessary in many cases, so if
1408  // that was fixed, using a DT here would be ok.
1409  //
1410  // There is one important particular case we still want to handle: if BO is
1411  // the IV increment. Important properties that make it profitable:
1412  // - We can speculate IV increment anywhere in the loop (as long as the
1413  // indvar Phi is its only user);
1414  // - Upon computing Cmp, we effectively compute something equivalent to the
1415  // IV increment (despite it loops differently in the IR). So moving it up
1416  // to the cmp point does not really increase register pressure.
1417  return false;
1418  }
1419 
1420  // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1421  if (BO->getOpcode() == Instruction::Add &&
1422  IID == Intrinsic::usub_with_overflow) {
1423  assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1424  Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1425  }
1426 
1427  // Insert at the first instruction of the pair.
1428  Instruction *InsertPt = nullptr;
1429  for (Instruction &Iter : *Cmp->getParent()) {
1430  // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1431  // the overflow intrinsic are defined.
1432  if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1433  InsertPt = &Iter;
1434  break;
1435  }
1436  }
1437  assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1438 
1439  IRBuilder<> Builder(InsertPt);
1440  Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1441  if (BO->getOpcode() != Instruction::Xor) {
1442  Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1443  BO->replaceAllUsesWith(Math);
1444  } else
1445  assert(BO->hasOneUse() &&
1446  "Patterns with XOr should use the BO only in the compare");
1447  Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1448  Cmp->replaceAllUsesWith(OV);
1449  Cmp->eraseFromParent();
1450  BO->eraseFromParent();
1451  return true;
1452 }
1453 
1454 /// Match special-case patterns that check for unsigned add overflow.
1456  BinaryOperator *&Add) {
1457  // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1458  // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1459  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1460 
1461  // We are not expecting non-canonical/degenerate code. Just bail out.
1462  if (isa<Constant>(A))
1463  return false;
1464 
1465  ICmpInst::Predicate Pred = Cmp->getPredicate();
1466  if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1467  B = ConstantInt::get(B->getType(), 1);
1468  else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1469  B = ConstantInt::get(B->getType(), -1);
1470  else
1471  return false;
1472 
1473  // Check the users of the variable operand of the compare looking for an add
1474  // with the adjusted constant.
1475  for (User *U : A->users()) {
1476  if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1477  Add = cast<BinaryOperator>(U);
1478  return true;
1479  }
1480  }
1481  return false;
1482 }
1483 
1484 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1485 /// intrinsic. Return true if any changes were made.
1486 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1487  bool &ModifiedDT) {
1488  Value *A, *B;
1490  if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1492  return false;
1493  // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1494  A = Add->getOperand(0);
1495  B = Add->getOperand(1);
1496  }
1497 
1498  if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1499  TLI->getValueType(*DL, Add->getType()),
1500  Add->hasNUsesOrMore(2)))
1501  return false;
1502 
1503  // We don't want to move around uses of condition values this late, so we
1504  // check if it is legal to create the call to the intrinsic in the basic
1505  // block containing the icmp.
1506  if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1507  return false;
1508 
1509  if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1510  Intrinsic::uadd_with_overflow))
1511  return false;
1512 
1513  // Reset callers - do not crash by iterating over a dead instruction.
1514  ModifiedDT = true;
1515  return true;
1516 }
1517 
1518 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1519  bool &ModifiedDT) {
1520  // We are not expecting non-canonical/degenerate code. Just bail out.
1521  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1522  if (isa<Constant>(A) && isa<Constant>(B))
1523  return false;
1524 
1525  // Convert (A u> B) to (A u< B) to simplify pattern matching.
1526  ICmpInst::Predicate Pred = Cmp->getPredicate();
1527  if (Pred == ICmpInst::ICMP_UGT) {
1528  std::swap(A, B);
1529  Pred = ICmpInst::ICMP_ULT;
1530  }
1531  // Convert special-case: (A == 0) is the same as (A u< 1).
1532  if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1533  B = ConstantInt::get(B->getType(), 1);
1534  Pred = ICmpInst::ICMP_ULT;
1535  }
1536  // Convert special-case: (A != 0) is the same as (0 u< A).
1537  if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1538  std::swap(A, B);
1539  Pred = ICmpInst::ICMP_ULT;
1540  }
1541  if (Pred != ICmpInst::ICMP_ULT)
1542  return false;
1543 
1544  // Walk the users of a variable operand of a compare looking for a subtract or
1545  // add with that same operand. Also match the 2nd operand of the compare to
1546  // the add/sub, but that may be a negated constant operand of an add.
1547  Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1548  BinaryOperator *Sub = nullptr;
1549  for (User *U : CmpVariableOperand->users()) {
1550  // A - B, A u< B --> usubo(A, B)
1551  if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1552  Sub = cast<BinaryOperator>(U);
1553  break;
1554  }
1555 
1556  // A + (-C), A u< C (canonicalized form of (sub A, C))
1557  const APInt *CmpC, *AddC;
1558  if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1559  match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1560  Sub = cast<BinaryOperator>(U);
1561  break;
1562  }
1563  }
1564  if (!Sub)
1565  return false;
1566 
1567  if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1568  TLI->getValueType(*DL, Sub->getType()),
1569  Sub->hasNUsesOrMore(2)))
1570  return false;
1571 
1572  if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1573  Cmp, Intrinsic::usub_with_overflow))
1574  return false;
1575 
1576  // Reset callers - do not crash by iterating over a dead instruction.
1577  ModifiedDT = true;
1578  return true;
1579 }
1580 
1581 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1582 /// registers that must be created and coalesced. This is a clear win except on
1583 /// targets with multiple condition code registers (PowerPC), where it might
1584 /// lose; some adjustment may be wanted there.
1585 ///
1586 /// Return true if any changes are made.
1587 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1589  return false;
1590 
1591  // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1592  if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1593  return false;
1594 
1595  // Only insert a cmp in each block once.
1596  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1597 
1598  bool MadeChange = false;
1599  for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1600  UI != E; ) {
1601  Use &TheUse = UI.getUse();
1602  Instruction *User = cast<Instruction>(*UI);
1603 
1604  // Preincrement use iterator so we don't invalidate it.
1605  ++UI;
1606 
1607  // Don't bother for PHI nodes.
1608  if (isa<PHINode>(User))
1609  continue;
1610 
1611  // Figure out which BB this cmp is used in.
1612  BasicBlock *UserBB = User->getParent();
1613  BasicBlock *DefBB = Cmp->getParent();
1614 
1615  // If this user is in the same block as the cmp, don't change the cmp.
1616  if (UserBB == DefBB) continue;
1617 
1618  // If we have already inserted a cmp into this block, use it.
1619  CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1620 
1621  if (!InsertedCmp) {
1622  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1623  assert(InsertPt != UserBB->end());
1624  InsertedCmp =
1625  CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1626  Cmp->getOperand(0), Cmp->getOperand(1), "",
1627  &*InsertPt);
1628  // Propagate the debug info.
1629  InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1630  }
1631 
1632  // Replace a use of the cmp with a use of the new cmp.
1633  TheUse = InsertedCmp;
1634  MadeChange = true;
1635  ++NumCmpUses;
1636  }
1637 
1638  // If we removed all uses, nuke the cmp.
1639  if (Cmp->use_empty()) {
1640  Cmp->eraseFromParent();
1641  MadeChange = true;
1642  }
1643 
1644  return MadeChange;
1645 }
1646 
1647 /// For pattern like:
1648 ///
1649 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1650 /// ...
1651 /// DomBB:
1652 /// ...
1653 /// br DomCond, TrueBB, CmpBB
1654 /// CmpBB: (with DomBB being the single predecessor)
1655 /// ...
1656 /// Cmp = icmp eq CmpOp0, CmpOp1
1657 /// ...
1658 ///
1659 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1660 /// different from lowering of icmp eq (PowerPC). This function try to convert
1661 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1662 /// After that, DomCond and Cmp can use the same comparison so reduce one
1663 /// comparison.
1664 ///
1665 /// Return true if any changes are made.
1667  const TargetLowering &TLI) {
1669  return false;
1670 
1671  ICmpInst::Predicate Pred = Cmp->getPredicate();
1672  if (Pred != ICmpInst::ICMP_EQ)
1673  return false;
1674 
1675  // If icmp eq has users other than BranchInst and SelectInst, converting it to
1676  // icmp slt/sgt would introduce more redundant LLVM IR.
1677  for (User *U : Cmp->users()) {
1678  if (isa<BranchInst>(U))
1679  continue;
1680  if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1681  continue;
1682  return false;
1683  }
1684 
1685  // This is a cheap/incomplete check for dominance - just match a single
1686  // predecessor with a conditional branch.
1687  BasicBlock *CmpBB = Cmp->getParent();
1688  BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1689  if (!DomBB)
1690  return false;
1691 
1692  // We want to ensure that the only way control gets to the comparison of
1693  // interest is that a less/greater than comparison on the same operands is
1694  // false.
1695  Value *DomCond;
1696  BasicBlock *TrueBB, *FalseBB;
1697  if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1698  return false;
1699  if (CmpBB != FalseBB)
1700  return false;
1701 
1702  Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1703  ICmpInst::Predicate DomPred;
1704  if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1705  return false;
1706  if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1707  return false;
1708 
1709  // Convert the equality comparison to the opposite of the dominating
1710  // comparison and swap the direction for all branch/select users.
1711  // We have conceptually converted:
1712  // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1713  // to
1714  // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1715  // And similarly for branches.
1716  for (User *U : Cmp->users()) {
1717  if (auto *BI = dyn_cast<BranchInst>(U)) {
1718  assert(BI->isConditional() && "Must be conditional");
1719  BI->swapSuccessors();
1720  continue;
1721  }
1722  if (auto *SI = dyn_cast<SelectInst>(U)) {
1723  // Swap operands
1724  SI->swapValues();
1725  SI->swapProfMetadata();
1726  continue;
1727  }
1728  llvm_unreachable("Must be a branch or a select");
1729  }
1730  Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1731  return true;
1732 }
1733 
1734 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) {
1735  if (sinkCmpExpression(Cmp, *TLI))
1736  return true;
1737 
1738  if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1739  return true;
1740 
1741  if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1742  return true;
1743 
1744  if (foldICmpWithDominatingICmp(Cmp, *TLI))
1745  return true;
1746 
1747  return false;
1748 }
1749 
1750 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1751 /// used in a compare to allow isel to generate better code for targets where
1752 /// this operation can be combined.
1753 ///
1754 /// Return true if any changes are made.
1756  const TargetLowering &TLI,
1757  SetOfInstrs &InsertedInsts) {
1758  // Double-check that we're not trying to optimize an instruction that was
1759  // already optimized by some other part of this pass.
1760  assert(!InsertedInsts.count(AndI) &&
1761  "Attempting to optimize already optimized and instruction");
1762  (void) InsertedInsts;
1763 
1764  // Nothing to do for single use in same basic block.
1765  if (AndI->hasOneUse() &&
1766  AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1767  return false;
1768 
1769  // Try to avoid cases where sinking/duplicating is likely to increase register
1770  // pressure.
1771  if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1772  !isa<ConstantInt>(AndI->getOperand(1)) &&
1773  AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1774  return false;
1775 
1776  for (auto *U : AndI->users()) {
1777  Instruction *User = cast<Instruction>(U);
1778 
1779  // Only sink 'and' feeding icmp with 0.
1780  if (!isa<ICmpInst>(User))
1781  return false;
1782 
1783  auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1784  if (!CmpC || !CmpC->isZero())
1785  return false;
1786  }
1787 
1788  if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1789  return false;
1790 
1791  LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1792  LLVM_DEBUG(AndI->getParent()->dump());
1793 
1794  // Push the 'and' into the same block as the icmp 0. There should only be
1795  // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1796  // others, so we don't need to keep track of which BBs we insert into.
1797  for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1798  UI != E; ) {
1799  Use &TheUse = UI.getUse();
1800  Instruction *User = cast<Instruction>(*UI);
1801 
1802  // Preincrement use iterator so we don't invalidate it.
1803  ++UI;
1804 
1805  LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1806 
1807  // Keep the 'and' in the same place if the use is already in the same block.
1808  Instruction *InsertPt =
1809  User->getParent() == AndI->getParent() ? AndI : User;
1810  Instruction *InsertedAnd =
1811  BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1812  AndI->getOperand(1), "", InsertPt);
1813  // Propagate the debug info.
1814  InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1815 
1816  // Replace a use of the 'and' with a use of the new 'and'.
1817  TheUse = InsertedAnd;
1818  ++NumAndUses;
1819  LLVM_DEBUG(User->getParent()->dump());
1820  }
1821 
1822  // We removed all uses, nuke the and.
1823  AndI->eraseFromParent();
1824  return true;
1825 }
1826 
1827 /// Check if the candidates could be combined with a shift instruction, which
1828 /// includes:
1829 /// 1. Truncate instruction
1830 /// 2. And instruction and the imm is a mask of the low bits:
1831 /// imm & (imm+1) == 0
1833  if (!isa<TruncInst>(User)) {
1834  if (User->getOpcode() != Instruction::And ||
1835  !isa<ConstantInt>(User->getOperand(1)))
1836  return false;
1837 
1838  const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1839 
1840  if ((Cimm & (Cimm + 1)).getBoolValue())
1841  return false;
1842  }
1843  return true;
1844 }
1845 
1846 /// Sink both shift and truncate instruction to the use of truncate's BB.
1847 static bool
1850  const TargetLowering &TLI, const DataLayout &DL) {
1851  BasicBlock *UserBB = User->getParent();
1852  DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1853  auto *TruncI = cast<TruncInst>(User);
1854  bool MadeChange = false;
1855 
1856  for (Value::user_iterator TruncUI = TruncI->user_begin(),
1857  TruncE = TruncI->user_end();
1858  TruncUI != TruncE;) {
1859 
1860  Use &TruncTheUse = TruncUI.getUse();
1861  Instruction *TruncUser = cast<Instruction>(*TruncUI);
1862  // Preincrement use iterator so we don't invalidate it.
1863 
1864  ++TruncUI;
1865 
1866  int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1867  if (!ISDOpcode)
1868  continue;
1869 
1870  // If the use is actually a legal node, there will not be an
1871  // implicit truncate.
1872  // FIXME: always querying the result type is just an
1873  // approximation; some nodes' legality is determined by the
1874  // operand or other means. There's no good way to find out though.
1875  if (TLI.isOperationLegalOrCustom(
1876  ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1877  continue;
1878 
1879  // Don't bother for PHI nodes.
1880  if (isa<PHINode>(TruncUser))
1881  continue;
1882 
1883  BasicBlock *TruncUserBB = TruncUser->getParent();
1884 
1885  if (UserBB == TruncUserBB)
1886  continue;
1887 
1888  BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1889  CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1890 
1891  if (!InsertedShift && !InsertedTrunc) {
1892  BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1893  assert(InsertPt != TruncUserBB->end());
1894  // Sink the shift
1895  if (ShiftI->getOpcode() == Instruction::AShr)
1896  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1897  "", &*InsertPt);
1898  else
1899  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1900  "", &*InsertPt);
1901  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1902 
1903  // Sink the trunc
1904  BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1905  TruncInsertPt++;
1906  assert(TruncInsertPt != TruncUserBB->end());
1907 
1908  InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1909  TruncI->getType(), "", &*TruncInsertPt);
1910  InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
1911 
1912  MadeChange = true;
1913 
1914  TruncTheUse = InsertedTrunc;
1915  }
1916  }
1917  return MadeChange;
1918 }
1919 
1920 /// Sink the shift *right* instruction into user blocks if the uses could
1921 /// potentially be combined with this shift instruction and generate BitExtract
1922 /// instruction. It will only be applied if the architecture supports BitExtract
1923 /// instruction. Here is an example:
1924 /// BB1:
1925 /// %x.extract.shift = lshr i64 %arg1, 32
1926 /// BB2:
1927 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1928 /// ==>
1929 ///
1930 /// BB2:
1931 /// %x.extract.shift.1 = lshr i64 %arg1, 32
1932 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1933 ///
1934 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
1935 /// instruction.
1936 /// Return true if any changes are made.
1938  const TargetLowering &TLI,
1939  const DataLayout &DL) {
1940  BasicBlock *DefBB = ShiftI->getParent();
1941 
1942  /// Only insert instructions in each block once.
1944 
1945  bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1946 
1947  bool MadeChange = false;
1948  for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1949  UI != E;) {
1950  Use &TheUse = UI.getUse();
1951  Instruction *User = cast<Instruction>(*UI);
1952  // Preincrement use iterator so we don't invalidate it.
1953  ++UI;
1954 
1955  // Don't bother for PHI nodes.
1956  if (isa<PHINode>(User))
1957  continue;
1958 
1960  continue;
1961 
1962  BasicBlock *UserBB = User->getParent();
1963 
1964  if (UserBB == DefBB) {
1965  // If the shift and truncate instruction are in the same BB. The use of
1966  // the truncate(TruncUse) may still introduce another truncate if not
1967  // legal. In this case, we would like to sink both shift and truncate
1968  // instruction to the BB of TruncUse.
1969  // for example:
1970  // BB1:
1971  // i64 shift.result = lshr i64 opnd, imm
1972  // trunc.result = trunc shift.result to i16
1973  //
1974  // BB2:
1975  // ----> We will have an implicit truncate here if the architecture does
1976  // not have i16 compare.
1977  // cmp i16 trunc.result, opnd2
1978  //
1979  if (isa<TruncInst>(User) && shiftIsLegal
1980  // If the type of the truncate is legal, no truncate will be
1981  // introduced in other basic blocks.
1982  &&
1983  (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1984  MadeChange =
1985  SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1986 
1987  continue;
1988  }
1989  // If we have already inserted a shift into this block, use it.
1990  BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1991 
1992  if (!InsertedShift) {
1993  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1994  assert(InsertPt != UserBB->end());
1995 
1996  if (ShiftI->getOpcode() == Instruction::AShr)
1997  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1998  "", &*InsertPt);
1999  else
2000  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
2001  "", &*InsertPt);
2002  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2003 
2004  MadeChange = true;
2005  }
2006 
2007  // Replace a use of the shift with a use of the new shift.
2008  TheUse = InsertedShift;
2009  }
2010 
2011  // If we removed all uses, or there are none, nuke the shift.
2012  if (ShiftI->use_empty()) {
2013  salvageDebugInfo(*ShiftI);
2014  ShiftI->eraseFromParent();
2015  MadeChange = true;
2016  }
2017 
2018  return MadeChange;
2019 }
2020 
2021 /// If counting leading or trailing zeros is an expensive operation and a zero
2022 /// input is defined, add a check for zero to avoid calling the intrinsic.
2023 ///
2024 /// We want to transform:
2025 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2026 ///
2027 /// into:
2028 /// entry:
2029 /// %cmpz = icmp eq i64 %A, 0
2030 /// br i1 %cmpz, label %cond.end, label %cond.false
2031 /// cond.false:
2032 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2033 /// br label %cond.end
2034 /// cond.end:
2035 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2036 ///
2037 /// If the transform is performed, return true and set ModifiedDT to true.
2038 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2039  const TargetLowering *TLI,
2040  const DataLayout *DL,
2041  bool &ModifiedDT) {
2042  // If a zero input is undefined, it doesn't make sense to despeculate that.
2043  if (match(CountZeros->getOperand(1), m_One()))
2044  return false;
2045 
2046  // If it's cheap to speculate, there's nothing to do.
2047  auto IntrinsicID = CountZeros->getIntrinsicID();
2048  if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
2049  (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
2050  return false;
2051 
2052  // Only handle legal scalar cases. Anything else requires too much work.
2053  Type *Ty = CountZeros->getType();
2054  unsigned SizeInBits = Ty->getScalarSizeInBits();
2055  if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2056  return false;
2057 
2058  // Bail if the value is never zero.
2059  Use &Op = CountZeros->getOperandUse(0);
2060  if (isKnownNonZero(Op, *DL))
2061  return false;
2062 
2063  // The intrinsic will be sunk behind a compare against zero and branch.
2064  BasicBlock *StartBlock = CountZeros->getParent();
2065  BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2066 
2067  // Create another block after the count zero intrinsic. A PHI will be added
2068  // in this block to select the result of the intrinsic or the bit-width
2069  // constant if the input to the intrinsic is zero.
2070  BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2071  BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2072 
2073  // Set up a builder to create a compare, conditional branch, and PHI.
2074  IRBuilder<> Builder(CountZeros->getContext());
2075  Builder.SetInsertPoint(StartBlock->getTerminator());
2076  Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2077 
2078  // Replace the unconditional branch that was created by the first split with
2079  // a compare against zero and a conditional branch.
2080  Value *Zero = Constant::getNullValue(Ty);
2081  // Avoid introducing branch on poison. This also replaces the ctz operand.
2083  Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2084  Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2085  Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2086  StartBlock->getTerminator()->eraseFromParent();
2087 
2088  // Create a PHI in the end block to select either the output of the intrinsic
2089  // or the bit width of the operand.
2090  Builder.SetInsertPoint(&EndBlock->front());
2091  PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2092  CountZeros->replaceAllUsesWith(PN);
2093  Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2094  PN->addIncoming(BitWidth, StartBlock);
2095  PN->addIncoming(CountZeros, CallBlock);
2096 
2097  // We are explicitly handling the zero case, so we can set the intrinsic's
2098  // undefined zero argument to 'true'. This will also prevent reprocessing the
2099  // intrinsic; we only despeculate when a zero input is defined.
2100  CountZeros->setArgOperand(1, Builder.getTrue());
2101  ModifiedDT = true;
2102  return true;
2103 }
2104 
2105 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
2106  BasicBlock *BB = CI->getParent();
2107 
2108  // Lower inline assembly if we can.
2109  // If we found an inline asm expession, and if the target knows how to
2110  // lower it to normal LLVM code, do so now.
2111  if (CI->isInlineAsm()) {
2112  if (TLI->ExpandInlineAsm(CI)) {
2113  // Avoid invalidating the iterator.
2114  CurInstIterator = BB->begin();
2115  // Avoid processing instructions out of order, which could cause
2116  // reuse before a value is defined.
2117  SunkAddrs.clear();
2118  return true;
2119  }
2120  // Sink address computing for memory operands into the block.
2121  if (optimizeInlineAsmInst(CI))
2122  return true;
2123  }
2124 
2125  // Align the pointer arguments to this call if the target thinks it's a good
2126  // idea
2127  unsigned MinSize;
2128  Align PrefAlign;
2129  if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2130  for (auto &Arg : CI->args()) {
2131  // We want to align both objects whose address is used directly and
2132  // objects whose address is used in casts and GEPs, though it only makes
2133  // sense for GEPs if the offset is a multiple of the desired alignment and
2134  // if size - offset meets the size threshold.
2135  if (!Arg->getType()->isPointerTy())
2136  continue;
2137  APInt Offset(DL->getIndexSizeInBits(
2138  cast<PointerType>(Arg->getType())->getAddressSpace()),
2139  0);
2140  Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2141  uint64_t Offset2 = Offset.getLimitedValue();
2142  if (!isAligned(PrefAlign, Offset2))
2143  continue;
2144  AllocaInst *AI;
2145  if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2146  DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2147  AI->setAlignment(PrefAlign);
2148  // Global variables can only be aligned if they are defined in this
2149  // object (i.e. they are uniquely initialized in this object), and
2150  // over-aligning global variables that have an explicit section is
2151  // forbidden.
2152  GlobalVariable *GV;
2153  if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2154  GV->getPointerAlignment(*DL) < PrefAlign &&
2155  DL->getTypeAllocSize(GV->getValueType()) >=
2156  MinSize + Offset2)
2157  GV->setAlignment(PrefAlign);
2158  }
2159  // If this is a memcpy (or similar) then we may be able to improve the
2160  // alignment
2161  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2162  Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2163  MaybeAlign MIDestAlign = MI->getDestAlign();
2164  if (!MIDestAlign || DestAlign > *MIDestAlign)
2165  MI->setDestAlignment(DestAlign);
2166  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2167  MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2168  Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2169  if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2170  MTI->setSourceAlignment(SrcAlign);
2171  }
2172  }
2173  }
2174 
2175  // If we have a cold call site, try to sink addressing computation into the
2176  // cold block. This interacts with our handling for loads and stores to
2177  // ensure that we can fold all uses of a potential addressing computation
2178  // into their uses. TODO: generalize this to work over profiling data
2179  if (CI->hasFnAttr(Attribute::Cold) &&
2180  !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2181  for (auto &Arg : CI->args()) {
2182  if (!Arg->getType()->isPointerTy())
2183  continue;
2184  unsigned AS = Arg->getType()->getPointerAddressSpace();
2185  return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2186  }
2187 
2188  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2189  if (II) {
2190  switch (II->getIntrinsicID()) {
2191  default: break;
2192  case Intrinsic::assume:
2193  llvm_unreachable("llvm.assume should have been removed already");
2194  case Intrinsic::experimental_widenable_condition: {
2195  // Give up on future widening oppurtunties so that we can fold away dead
2196  // paths and merge blocks before going into block-local instruction
2197  // selection.
2198  if (II->use_empty()) {
2199  II->eraseFromParent();
2200  return true;
2201  }
2202  Constant *RetVal = ConstantInt::getTrue(II->getContext());
2203  resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2204  replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2205  });
2206  return true;
2207  }
2208  case Intrinsic::objectsize:
2209  llvm_unreachable("llvm.objectsize.* should have been lowered already");
2210  case Intrinsic::is_constant:
2211  llvm_unreachable("llvm.is.constant.* should have been lowered already");
2212  case Intrinsic::aarch64_stlxr:
2213  case Intrinsic::aarch64_stxr: {
2214  ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2215  if (!ExtVal || !ExtVal->hasOneUse() ||
2216  ExtVal->getParent() == CI->getParent())
2217  return false;
2218  // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2219  ExtVal->moveBefore(CI);
2220  // Mark this instruction as "inserted by CGP", so that other
2221  // optimizations don't touch it.
2222  InsertedInsts.insert(ExtVal);
2223  return true;
2224  }
2225 
2226  case Intrinsic::launder_invariant_group:
2227  case Intrinsic::strip_invariant_group: {
2228  Value *ArgVal = II->getArgOperand(0);
2229  auto it = LargeOffsetGEPMap.find(II);
2230  if (it != LargeOffsetGEPMap.end()) {
2231  // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2232  // Make sure not to have to deal with iterator invalidation
2233  // after possibly adding ArgVal to LargeOffsetGEPMap.
2234  auto GEPs = std::move(it->second);
2235  LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2236  LargeOffsetGEPMap.erase(II);
2237  }
2238 
2239  II->replaceAllUsesWith(ArgVal);
2240  II->eraseFromParent();
2241  return true;
2242  }
2243  case Intrinsic::cttz:
2244  case Intrinsic::ctlz:
2245  // If counting zeros is expensive, try to avoid it.
2246  return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2247  case Intrinsic::fshl:
2248  case Intrinsic::fshr:
2249  return optimizeFunnelShift(II);
2250  case Intrinsic::dbg_value:
2251  return fixupDbgValue(II);
2252  case Intrinsic::vscale: {
2253  // If datalayout has no special restrictions on vector data layout,
2254  // replace `llvm.vscale` by an equivalent constant expression
2255  // to benefit from cheap constant propagation.
2256  Type *ScalableVectorTy =
2257  VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2258  if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2259  auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2260  auto *One = ConstantInt::getSigned(II->getType(), 1);
2261  auto *CGep =
2262  ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2264  II->eraseFromParent();
2265  return true;
2266  }
2267  break;
2268  }
2269  case Intrinsic::masked_gather:
2270  return optimizeGatherScatterInst(II, II->getArgOperand(0));
2271  case Intrinsic::masked_scatter:
2272  return optimizeGatherScatterInst(II, II->getArgOperand(1));
2273  }
2274 
2275  SmallVector<Value *, 2> PtrOps;
2276  Type *AccessTy;
2277  if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2278  while (!PtrOps.empty()) {
2279  Value *PtrVal = PtrOps.pop_back_val();
2280  unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2281  if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2282  return true;
2283  }
2284  }
2285 
2286  // From here on out we're working with named functions.
2287  if (!CI->getCalledFunction()) return false;
2288 
2289  // Lower all default uses of _chk calls. This is very similar
2290  // to what InstCombineCalls does, but here we are only lowering calls
2291  // to fortified library functions (e.g. __memcpy_chk) that have the default
2292  // "don't know" as the objectsize. Anything else should be left alone.
2293  FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2294  IRBuilder<> Builder(CI);
2295  if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2296  CI->replaceAllUsesWith(V);
2297  CI->eraseFromParent();
2298  return true;
2299  }
2300 
2301  return false;
2302 }
2303 
2304 /// Look for opportunities to duplicate return instructions to the predecessor
2305 /// to enable tail call optimizations. The case it is currently looking for is:
2306 /// @code
2307 /// bb0:
2308 /// %tmp0 = tail call i32 @f0()
2309 /// br label %return
2310 /// bb1:
2311 /// %tmp1 = tail call i32 @f1()
2312 /// br label %return
2313 /// bb2:
2314 /// %tmp2 = tail call i32 @f2()
2315 /// br label %return
2316 /// return:
2317 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2318 /// ret i32 %retval
2319 /// @endcode
2320 ///
2321 /// =>
2322 ///
2323 /// @code
2324 /// bb0:
2325 /// %tmp0 = tail call i32 @f0()
2326 /// ret i32 %tmp0
2327 /// bb1:
2328 /// %tmp1 = tail call i32 @f1()
2329 /// ret i32 %tmp1
2330 /// bb2:
2331 /// %tmp2 = tail call i32 @f2()
2332 /// ret i32 %tmp2
2333 /// @endcode
2334 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2335  ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2336  if (!RetI)
2337  return false;
2338 
2339  PHINode *PN = nullptr;
2340  ExtractValueInst *EVI = nullptr;
2341  BitCastInst *BCI = nullptr;
2342  Value *V = RetI->getReturnValue();
2343  if (V) {
2344  BCI = dyn_cast<BitCastInst>(V);
2345  if (BCI)
2346  V = BCI->getOperand(0);
2347 
2348  EVI = dyn_cast<ExtractValueInst>(V);
2349  if (EVI) {
2350  V = EVI->getOperand(0);
2351  if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2352  return false;
2353  }
2354 
2355  PN = dyn_cast<PHINode>(V);
2356  if (!PN)
2357  return false;
2358  }
2359 
2360  if (PN && PN->getParent() != BB)
2361  return false;
2362 
2363  auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2364  const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2365  if (BC && BC->hasOneUse())
2366  Inst = BC->user_back();
2367 
2368  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2369  return II->getIntrinsicID() == Intrinsic::lifetime_end;
2370  return false;
2371  };
2372 
2373  // Make sure there are no instructions between the first instruction
2374  // and return.
2375  const Instruction *BI = BB->getFirstNonPHI();
2376  // Skip over debug and the bitcast.
2377  while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2378  isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2379  BI = BI->getNextNode();
2380  if (BI != RetI)
2381  return false;
2382 
2383  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2384  /// call.
2385  const Function *F = BB->getParent();
2386  SmallVector<BasicBlock*, 4> TailCallBBs;
2387  if (PN) {
2388  for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2389  // Look through bitcasts.
2390  Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2391  CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2392  BasicBlock *PredBB = PN->getIncomingBlock(I);
2393  // Make sure the phi value is indeed produced by the tail call.
2394  if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2395  TLI->mayBeEmittedAsTailCall(CI) &&
2396  attributesPermitTailCall(F, CI, RetI, *TLI))
2397  TailCallBBs.push_back(PredBB);
2398  }
2399  } else {
2400  SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2401  for (BasicBlock *Pred : predecessors(BB)) {
2402  if (!VisitedBBs.insert(Pred).second)
2403  continue;
2404  if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2405  CallInst *CI = dyn_cast<CallInst>(I);
2406  if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2407  attributesPermitTailCall(F, CI, RetI, *TLI))
2408  TailCallBBs.push_back(Pred);
2409  }
2410  }
2411  }
2412 
2413  bool Changed = false;
2414  for (auto const &TailCallBB : TailCallBBs) {
2415  // Make sure the call instruction is followed by an unconditional branch to
2416  // the return block.
2417  BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2418  if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2419  continue;
2420 
2421  // Duplicate the return into TailCallBB.
2422  (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2424  BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2425  BFI->setBlockFreq(
2426  BB,
2427  (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2428  ModifiedDT = Changed = true;
2429  ++NumRetsDup;
2430  }
2431 
2432  // If we eliminated all predecessors of the block, delete the block now.
2433  if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2434  BB->eraseFromParent();
2435 
2436  return Changed;
2437 }
2438 
2439 //===----------------------------------------------------------------------===//
2440 // Memory Optimization
2441 //===----------------------------------------------------------------------===//
2442 
2443 namespace {
2444 
2445 /// This is an extended version of TargetLowering::AddrMode
2446 /// which holds actual Value*'s for register values.
2447 struct ExtAddrMode : public TargetLowering::AddrMode {
2448  Value *BaseReg = nullptr;
2449  Value *ScaledReg = nullptr;
2450  Value *OriginalValue = nullptr;
2451  bool InBounds = true;
2452 
2453  enum FieldName {
2454  NoField = 0x00,
2455  BaseRegField = 0x01,
2456  BaseGVField = 0x02,
2457  BaseOffsField = 0x04,
2458  ScaledRegField = 0x08,
2459  ScaleField = 0x10,
2460  MultipleFields = 0xff
2461  };
2462 
2463 
2464  ExtAddrMode() = default;
2465 
2466  void print(raw_ostream &OS) const;
2467  void dump() const;
2468 
2469  FieldName compare(const ExtAddrMode &other) {
2470  // First check that the types are the same on each field, as differing types
2471  // is something we can't cope with later on.
2472  if (BaseReg && other.BaseReg &&
2473  BaseReg->getType() != other.BaseReg->getType())
2474  return MultipleFields;
2475  if (BaseGV && other.BaseGV &&
2476  BaseGV->getType() != other.BaseGV->getType())
2477  return MultipleFields;
2478  if (ScaledReg && other.ScaledReg &&
2479  ScaledReg->getType() != other.ScaledReg->getType())
2480  return MultipleFields;
2481 
2482  // Conservatively reject 'inbounds' mismatches.
2483  if (InBounds != other.InBounds)
2484  return MultipleFields;
2485 
2486  // Check each field to see if it differs.
2487  unsigned Result = NoField;
2488  if (BaseReg != other.BaseReg)
2489  Result |= BaseRegField;
2490  if (BaseGV != other.BaseGV)
2491  Result |= BaseGVField;
2492  if (BaseOffs != other.BaseOffs)
2493  Result |= BaseOffsField;
2494  if (ScaledReg != other.ScaledReg)
2495  Result |= ScaledRegField;
2496  // Don't count 0 as being a different scale, because that actually means
2497  // unscaled (which will already be counted by having no ScaledReg).
2498  if (Scale && other.Scale && Scale != other.Scale)
2499  Result |= ScaleField;
2500 
2501  if (countPopulation(Result) > 1)
2502  return MultipleFields;
2503  else
2504  return static_cast<FieldName>(Result);
2505  }
2506 
2507  // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2508  // with no offset.
2509  bool isTrivial() {
2510  // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2511  // trivial if at most one of these terms is nonzero, except that BaseGV and
2512  // BaseReg both being zero actually means a null pointer value, which we
2513  // consider to be 'non-zero' here.
2514  return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2515  }
2516 
2517  Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2518  switch (Field) {
2519  default:
2520  return nullptr;
2521  case BaseRegField:
2522  return BaseReg;
2523  case BaseGVField:
2524  return BaseGV;
2525  case ScaledRegField:
2526  return ScaledReg;
2527  case BaseOffsField:
2528  return ConstantInt::get(IntPtrTy, BaseOffs);
2529  }
2530  }
2531 
2532  void SetCombinedField(FieldName Field, Value *V,
2533  const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2534  switch (Field) {
2535  default:
2536  llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2537  break;
2538  case ExtAddrMode::BaseRegField:
2539  BaseReg = V;
2540  break;
2541  case ExtAddrMode::BaseGVField:
2542  // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2543  // in the BaseReg field.
2544  assert(BaseReg == nullptr);
2545  BaseReg = V;
2546  BaseGV = nullptr;
2547  break;
2548  case ExtAddrMode::ScaledRegField:
2549  ScaledReg = V;
2550  // If we have a mix of scaled and unscaled addrmodes then we want scale
2551  // to be the scale and not zero.
2552  if (!Scale)
2553  for (const ExtAddrMode &AM : AddrModes)
2554  if (AM.Scale) {
2555  Scale = AM.Scale;
2556  break;
2557  }
2558  break;
2559  case ExtAddrMode::BaseOffsField:
2560  // The offset is no longer a constant, so it goes in ScaledReg with a
2561  // scale of 1.
2562  assert(ScaledReg == nullptr);
2563  ScaledReg = V;
2564  Scale = 1;
2565  BaseOffs = 0;
2566  break;
2567  }
2568  }
2569 };
2570 
2571 } // end anonymous namespace
2572 
2573 #ifndef NDEBUG
2574 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2575  AM.print(OS);
2576  return OS;
2577 }
2578 #endif
2579 
2580 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2581 void ExtAddrMode::print(raw_ostream &OS) const {
2582  bool NeedPlus = false;
2583  OS << "[";
2584  if (InBounds)
2585  OS << "inbounds ";
2586  if (BaseGV) {
2587  OS << (NeedPlus ? " + " : "")
2588  << "GV:";
2589  BaseGV->printAsOperand(OS, /*PrintType=*/false);
2590  NeedPlus = true;
2591  }
2592 
2593  if (BaseOffs) {
2594  OS << (NeedPlus ? " + " : "")
2595  << BaseOffs;
2596  NeedPlus = true;
2597  }
2598 
2599  if (BaseReg) {
2600  OS << (NeedPlus ? " + " : "")
2601  << "Base:";
2602  BaseReg->printAsOperand(OS, /*PrintType=*/false);
2603  NeedPlus = true;
2604  }
2605  if (Scale) {
2606  OS << (NeedPlus ? " + " : "")
2607  << Scale << "*";
2608  ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2609  }
2610 
2611  OS << ']';
2612 }
2613 
2614 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2615  print(dbgs());
2616  dbgs() << '\n';
2617 }
2618 #endif
2619 
2620 namespace {
2621 
2622 /// This class provides transaction based operation on the IR.
2623 /// Every change made through this class is recorded in the internal state and
2624 /// can be undone (rollback) until commit is called.
2625 /// CGP does not check if instructions could be speculatively executed when
2626 /// moved. Preserving the original location would pessimize the debugging
2627 /// experience, as well as negatively impact the quality of sample PGO.
2628 class TypePromotionTransaction {
2629  /// This represents the common interface of the individual transaction.
2630  /// Each class implements the logic for doing one specific modification on
2631  /// the IR via the TypePromotionTransaction.
2632  class TypePromotionAction {
2633  protected:
2634  /// The Instruction modified.
2635  Instruction *Inst;
2636 
2637  public:
2638  /// Constructor of the action.
2639  /// The constructor performs the related action on the IR.
2640  TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2641 
2642  virtual ~TypePromotionAction() = default;
2643 
2644  /// Undo the modification done by this action.
2645  /// When this method is called, the IR must be in the same state as it was
2646  /// before this action was applied.
2647  /// \pre Undoing the action works if and only if the IR is in the exact same
2648  /// state as it was directly after this action was applied.
2649  virtual void undo() = 0;
2650 
2651  /// Advocate every change made by this action.
2652  /// When the results on the IR of the action are to be kept, it is important
2653  /// to call this function, otherwise hidden information may be kept forever.
2654  virtual void commit() {
2655  // Nothing to be done, this action is not doing anything.
2656  }
2657  };
2658 
2659  /// Utility to remember the position of an instruction.
2660  class InsertionHandler {
2661  /// Position of an instruction.
2662  /// Either an instruction:
2663  /// - Is the first in a basic block: BB is used.
2664  /// - Has a previous instruction: PrevInst is used.
2665  union {
2666  Instruction *PrevInst;
2667  BasicBlock *BB;
2668  } Point;
2669 
2670  /// Remember whether or not the instruction had a previous instruction.
2671  bool HasPrevInstruction;
2672 
2673  public:
2674  /// Record the position of \p Inst.
2675  InsertionHandler(Instruction *Inst) {
2676  BasicBlock::iterator It = Inst->getIterator();
2677  HasPrevInstruction = (It != (Inst->getParent()->begin()));
2678  if (HasPrevInstruction)
2679  Point.PrevInst = &*--It;
2680  else
2681  Point.BB = Inst->getParent();
2682  }
2683 
2684  /// Insert \p Inst at the recorded position.
2685  void insert(Instruction *Inst) {
2686  if (HasPrevInstruction) {
2687  if (Inst->getParent())
2688  Inst->removeFromParent();
2689  Inst->insertAfter(Point.PrevInst);
2690  } else {
2691  Instruction *Position = &*Point.BB->getFirstInsertionPt();
2692  if (Inst->getParent())
2693  Inst->moveBefore(Position);
2694  else
2695  Inst->insertBefore(Position);
2696  }
2697  }
2698  };
2699 
2700  /// Move an instruction before another.
2701  class InstructionMoveBefore : public TypePromotionAction {
2702  /// Original position of the instruction.
2703  InsertionHandler Position;
2704 
2705  public:
2706  /// Move \p Inst before \p Before.
2707  InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2708  : TypePromotionAction(Inst), Position(Inst) {
2709  LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2710  << "\n");
2711  Inst->moveBefore(Before);
2712  }
2713 
2714  /// Move the instruction back to its original position.
2715  void undo() override {
2716  LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2717  Position.insert(Inst);
2718  }
2719  };
2720 
2721  /// Set the operand of an instruction with a new value.
2722  class OperandSetter : public TypePromotionAction {
2723  /// Original operand of the instruction.
2724  Value *Origin;
2725 
2726  /// Index of the modified instruction.
2727  unsigned Idx;
2728 
2729  public:
2730  /// Set \p Idx operand of \p Inst with \p NewVal.
2731  OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2732  : TypePromotionAction(Inst), Idx(Idx) {
2733  LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2734  << "for:" << *Inst << "\n"
2735  << "with:" << *NewVal << "\n");
2736  Origin = Inst->getOperand(Idx);
2737  Inst->setOperand(Idx, NewVal);
2738  }
2739 
2740  /// Restore the original value of the instruction.
2741  void undo() override {
2742  LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2743  << "for: " << *Inst << "\n"
2744  << "with: " << *Origin << "\n");
2745  Inst->setOperand(Idx, Origin);
2746  }
2747  };
2748 
2749  /// Hide the operands of an instruction.
2750  /// Do as if this instruction was not using any of its operands.
2751  class OperandsHider : public TypePromotionAction {
2752  /// The list of original operands.
2753  SmallVector<Value *, 4> OriginalValues;
2754 
2755  public:
2756  /// Remove \p Inst from the uses of the operands of \p Inst.
2757  OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2758  LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2759  unsigned NumOpnds = Inst->getNumOperands();
2760  OriginalValues.reserve(NumOpnds);
2761  for (unsigned It = 0; It < NumOpnds; ++It) {
2762  // Save the current operand.
2763  Value *Val = Inst->getOperand(It);
2764  OriginalValues.push_back(Val);
2765  // Set a dummy one.
2766  // We could use OperandSetter here, but that would imply an overhead
2767  // that we are not willing to pay.
2768  Inst->setOperand(It, UndefValue::get(Val->getType()));
2769  }
2770  }
2771 
2772  /// Restore the original list of uses.
2773  void undo() override {
2774  LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2775  for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2776  Inst->setOperand(It, OriginalValues[It]);
2777  }
2778  };
2779 
2780  /// Build a truncate instruction.
2781  class TruncBuilder : public TypePromotionAction {
2782  Value *Val;
2783 
2784  public:
2785  /// Build a truncate instruction of \p Opnd producing a \p Ty
2786  /// result.
2787  /// trunc Opnd to Ty.
2788  TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2789  IRBuilder<> Builder(Opnd);
2790  Builder.SetCurrentDebugLocation(DebugLoc());
2791  Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2792  LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2793  }
2794 
2795  /// Get the built value.
2796  Value *getBuiltValue() { return Val; }
2797 
2798  /// Remove the built instruction.
2799  void undo() override {
2800  LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2801  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2802  IVal->eraseFromParent();
2803  }
2804  };
2805 
2806  /// Build a sign extension instruction.
2807  class SExtBuilder : public TypePromotionAction {
2808  Value *Val;
2809 
2810  public:
2811  /// Build a sign extension instruction of \p Opnd producing a \p Ty
2812  /// result.
2813  /// sext Opnd to Ty.
2814  SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2815  : TypePromotionAction(InsertPt) {
2816  IRBuilder<> Builder(InsertPt);
2817  Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2818  LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2819  }
2820 
2821  /// Get the built value.
2822  Value *getBuiltValue() { return Val; }
2823 
2824  /// Remove the built instruction.
2825  void undo() override {
2826  LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2827  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2828  IVal->eraseFromParent();
2829  }
2830  };
2831 
2832  /// Build a zero extension instruction.
2833  class ZExtBuilder : public TypePromotionAction {
2834  Value *Val;
2835 
2836  public:
2837  /// Build a zero extension instruction of \p Opnd producing a \p Ty
2838  /// result.
2839  /// zext Opnd to Ty.
2840  ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2841  : TypePromotionAction(InsertPt) {
2842  IRBuilder<> Builder(InsertPt);
2843  Builder.SetCurrentDebugLocation(DebugLoc());
2844  Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2845  LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2846  }
2847 
2848  /// Get the built value.
2849  Value *getBuiltValue() { return Val; }
2850 
2851  /// Remove the built instruction.
2852  void undo() override {
2853  LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2854  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2855  IVal->eraseFromParent();
2856  }
2857  };
2858 
2859  /// Mutate an instruction to another type.
2860  class TypeMutator : public TypePromotionAction {
2861  /// Record the original type.
2862  Type *OrigTy;
2863 
2864  public:
2865  /// Mutate the type of \p Inst into \p NewTy.
2866  TypeMutator(Instruction *Inst, Type *NewTy)
2867  : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2868  LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2869  << "\n");
2870  Inst->mutateType(NewTy);
2871  }
2872 
2873  /// Mutate the instruction back to its original type.
2874  void undo() override {
2875  LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2876  << "\n");
2877  Inst->mutateType(OrigTy);
2878  }
2879  };
2880 
2881  /// Replace the uses of an instruction by another instruction.
2882  class UsesReplacer : public TypePromotionAction {
2883  /// Helper structure to keep track of the replaced uses.
2884  struct InstructionAndIdx {
2885  /// The instruction using the instruction.
2886  Instruction *Inst;
2887 
2888  /// The index where this instruction is used for Inst.
2889  unsigned Idx;
2890 
2891  InstructionAndIdx(Instruction *Inst, unsigned Idx)
2892  : Inst(Inst), Idx(Idx) {}
2893  };
2894 
2895  /// Keep track of the original uses (pair Instruction, Index).
2896  SmallVector<InstructionAndIdx, 4> OriginalUses;
2897  /// Keep track of the debug users.
2899 
2900  /// Keep track of the new value so that we can undo it by replacing
2901  /// instances of the new value with the original value.
2902  Value *New;
2903 
2904  using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
2905 
2906  public:
2907  /// Replace all the use of \p Inst by \p New.
2908  UsesReplacer(Instruction *Inst, Value *New)
2909  : TypePromotionAction(Inst), New(New) {
2910  LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
2911  << "\n");
2912  // Record the original uses.
2913  for (Use &U : Inst->uses()) {
2914  Instruction *UserI = cast<Instruction>(U.getUser());
2915  OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
2916  }
2917  // Record the debug uses separately. They are not in the instruction's
2918  // use list, but they are replaced by RAUW.
2919  findDbgValues(DbgValues, Inst);
2920 
2921  // Now, we can replace the uses.
2922  Inst->replaceAllUsesWith(New);
2923  }
2924 
2925  /// Reassign the original uses of Inst to Inst.
2926  void undo() override {
2927  LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
2928  for (InstructionAndIdx &Use : OriginalUses)
2929  Use.Inst->setOperand(Use.Idx, Inst);
2930  // RAUW has replaced all original uses with references to the new value,
2931  // including the debug uses. Since we are undoing the replacements,
2932  // the original debug uses must also be reinstated to maintain the
2933  // correctness and utility of debug value instructions.
2934  for (auto *DVI : DbgValues)
2935  DVI->replaceVariableLocationOp(New, Inst);
2936  }
2937  };
2938 
2939  /// Remove an instruction from the IR.
2940  class InstructionRemover : public TypePromotionAction {
2941  /// Original position of the instruction.
2942  InsertionHandler Inserter;
2943 
2944  /// Helper structure to hide all the link to the instruction. In other
2945  /// words, this helps to do as if the instruction was removed.
2946  OperandsHider Hider;
2947 
2948  /// Keep track of the uses replaced, if any.
2949  UsesReplacer *Replacer = nullptr;
2950 
2951  /// Keep track of instructions removed.
2952  SetOfInstrs &RemovedInsts;
2953 
2954  public:
2955  /// Remove all reference of \p Inst and optionally replace all its
2956  /// uses with New.
2957  /// \p RemovedInsts Keep track of the instructions removed by this Action.
2958  /// \pre If !Inst->use_empty(), then New != nullptr
2959  InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
2960  Value *New = nullptr)
2961  : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
2962  RemovedInsts(RemovedInsts) {
2963  if (New)
2964  Replacer = new UsesReplacer(Inst, New);
2965  LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
2966  RemovedInsts.insert(Inst);
2967  /// The instructions removed here will be freed after completing
2968  /// optimizeBlock() for all blocks as we need to keep track of the
2969  /// removed instructions during promotion.
2970  Inst->removeFromParent();
2971  }
2972 
2973  ~InstructionRemover() override { delete Replacer; }
2974 
2975  /// Resurrect the instruction and reassign it to the proper uses if
2976  /// new value was provided when build this action.
2977  void undo() override {
2978  LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
2979  Inserter.insert(Inst);
2980  if (Replacer)
2981  Replacer->undo();
2982  Hider.undo();
2983  RemovedInsts.erase(Inst);
2984  }
2985  };
2986 
2987 public:
2988  /// Restoration point.
2989  /// The restoration point is a pointer to an action instead of an iterator
2990  /// because the iterator may be invalidated but not the pointer.
2991  using ConstRestorationPt = const TypePromotionAction *;
2992 
2993  TypePromotionTransaction(SetOfInstrs &RemovedInsts)
2994  : RemovedInsts(RemovedInsts) {}
2995 
2996  /// Advocate every changes made in that transaction. Return true if any change
2997  /// happen.
2998  bool commit();
2999 
3000  /// Undo all the changes made after the given point.
3001  void rollback(ConstRestorationPt Point);
3002 
3003  /// Get the current restoration point.
3004  ConstRestorationPt getRestorationPoint() const;
3005 
3006  /// \name API for IR modification with state keeping to support rollback.
3007  /// @{
3008  /// Same as Instruction::setOperand.
3009  void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3010 
3011  /// Same as Instruction::eraseFromParent.
3012  void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3013 
3014  /// Same as Value::replaceAllUsesWith.
3015  void replaceAllUsesWith(Instruction *Inst, Value *New);
3016 
3017  /// Same as Value::mutateType.
3018  void mutateType(Instruction *Inst, Type *NewTy);
3019 
3020  /// Same as IRBuilder::createTrunc.
3021  Value *createTrunc(Instruction *Opnd, Type *Ty);
3022 
3023  /// Same as IRBuilder::createSExt.
3024  Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3025 
3026  /// Same as IRBuilder::createZExt.
3027  Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3028 
3029  /// Same as Instruction::moveBefore.
3030  void moveBefore(Instruction *Inst, Instruction *Before);
3031  /// @}
3032 
3033 private:
3034  /// The ordered list of actions made so far.
3036 
3037  using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3038 
3039  SetOfInstrs &RemovedInsts;
3040 };
3041 
3042 } // end anonymous namespace
3043 
3044 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3045  Value *NewVal) {
3046  Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3047  Inst, Idx, NewVal));
3048 }
3049 
3051  Value *NewVal) {
3052  Actions.push_back(
3053  std::make_unique<TypePromotionTransaction::InstructionRemover>(
3054  Inst, RemovedInsts, NewVal));
3055 }
3056 
3057 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3058  Value *New) {
3059  Actions.push_back(
3060  std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3061 }
3062 
3063 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3064  Actions.push_back(
3065  std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3066 }
3067 
3068 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
3069  Type *Ty) {
3070  std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3071  Value *Val = Ptr->getBuiltValue();
3072  Actions.push_back(std::move(Ptr));
3073  return Val;
3074 }
3075 
3076 Value *TypePromotionTransaction::createSExt(Instruction *Inst,
3077  Value *Opnd, Type *Ty) {
3078  std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3079  Value *Val = Ptr->getBuiltValue();
3080  Actions.push_back(std::move(Ptr));
3081  return Val;
3082 }
3083 
3084 Value *TypePromotionTransaction::createZExt(Instruction *Inst,
3085  Value *Opnd, Type *Ty) {
3086  std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3087  Value *Val = Ptr->getBuiltValue();
3088  Actions.push_back(std::move(Ptr));
3089  return Val;
3090 }
3091 
3092 void TypePromotionTransaction::moveBefore(Instruction *Inst,
3093  Instruction *Before) {
3094  Actions.push_back(
3095  std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
3096  Inst, Before));
3097 }
3098 
3099 TypePromotionTransaction::ConstRestorationPt
3100 TypePromotionTransaction::getRestorationPoint() const {
3101  return !Actions.empty() ? Actions.back().get() : nullptr;
3102 }
3103 
3104 bool TypePromotionTransaction::commit() {
3105  for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3106  Action->commit();
3107  bool Modified = !Actions.empty();
3108  Actions.clear();
3109  return Modified;
3110 }
3111 
3112 void TypePromotionTransaction::rollback(
3113  TypePromotionTransaction::ConstRestorationPt Point) {
3114  while (!Actions.empty() && Point != Actions.back().get()) {
3115  std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3116  Curr->undo();
3117  }
3118 }
3119 
3120 namespace {
3121 
3122 /// A helper class for matching addressing modes.
3123 ///
3124 /// This encapsulates the logic for matching the target-legal addressing modes.
3125 class AddressingModeMatcher {
3126  SmallVectorImpl<Instruction*> &AddrModeInsts;
3127  const TargetLowering &TLI;
3128  const TargetRegisterInfo &TRI;
3129  const DataLayout &DL;
3130  const LoopInfo &LI;
3131  const std::function<const DominatorTree &()> getDTFn;
3132 
3133  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3134  /// the memory instruction that we're computing this address for.
3135  Type *AccessTy;
3136  unsigned AddrSpace;
3137  Instruction *MemoryInst;
3138 
3139  /// This is the addressing mode that we're building up. This is
3140  /// part of the return value of this addressing mode matching stuff.
3142 
3143  /// The instructions inserted by other CodeGenPrepare optimizations.
3144  const SetOfInstrs &InsertedInsts;
3145 
3146  /// A map from the instructions to their type before promotion.
3147  InstrToOrigTy &PromotedInsts;
3148 
3149  /// The ongoing transaction where every action should be registered.
3150  TypePromotionTransaction &TPT;
3151 
3152  // A GEP which has too large offset to be folded into the addressing mode.
3153  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3154 
3155  /// This is set to true when we should not do profitability checks.
3156  /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3157  bool IgnoreProfitability;
3158 
3159  /// True if we are optimizing for size.
3160  bool OptSize;
3161 
3162  ProfileSummaryInfo *PSI;
3164 
3165  AddressingModeMatcher(
3167  const TargetRegisterInfo &TRI, const LoopInfo &LI,
3168  const std::function<const DominatorTree &()> getDTFn,
3169  Type *AT, unsigned AS, Instruction *MI, ExtAddrMode &AM,
3170  const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3171  TypePromotionTransaction &TPT,
3172  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3173  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3174  : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3175  DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn),
3176  AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3177  InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3178  LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3179  IgnoreProfitability = false;
3180  }
3181 
3182 public:
3183  /// Find the maximal addressing mode that a load/store of V can fold,
3184  /// give an access type of AccessTy. This returns a list of involved
3185  /// instructions in AddrModeInsts.
3186  /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3187  /// optimizations.
3188  /// \p PromotedInsts maps the instructions to their type before promotion.
3189  /// \p The ongoing transaction where every action should be registered.
3190  static ExtAddrMode
3191  Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3192  SmallVectorImpl<Instruction *> &AddrModeInsts,
3193  const TargetLowering &TLI, const LoopInfo &LI,
3194  const std::function<const DominatorTree &()> getDTFn,
3195  const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3196  InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3197  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3198  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3200 
3201  bool Success = AddressingModeMatcher(
3202  AddrModeInsts, TLI, TRI, LI, getDTFn, AccessTy, AS, MemoryInst, Result,
3203  InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
3204  BFI).matchAddr(V, 0);
3205  (void)Success; assert(Success && "Couldn't select *anything*?");
3206  return Result;
3207  }
3208 
3209 private:
3210  bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3211  bool matchAddr(Value *Addr, unsigned Depth);
3212  bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3213  bool *MovedAway = nullptr);
3214  bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3215  ExtAddrMode &AMBefore,
3216  ExtAddrMode &AMAfter);
3217  bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3218  bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3219  Value *PromotedOperand) const;
3220 };
3221 
3222 class PhiNodeSet;
3223 
3224 /// An iterator for PhiNodeSet.
3225 class PhiNodeSetIterator {
3226  PhiNodeSet * const Set;
3227  size_t CurrentIndex = 0;
3228 
3229 public:
3230  /// The constructor. Start should point to either a valid element, or be equal
3231  /// to the size of the underlying SmallVector of the PhiNodeSet.
3232  PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start);
3233  PHINode * operator*() const;
3234  PhiNodeSetIterator& operator++();
3235  bool operator==(const PhiNodeSetIterator &RHS) const;
3236  bool operator!=(const PhiNodeSetIterator &RHS) const;
3237 };
3238 
3239 /// Keeps a set of PHINodes.
3240 ///
3241 /// This is a minimal set implementation for a specific use case:
3242 /// It is very fast when there are very few elements, but also provides good
3243 /// performance when there are many. It is similar to SmallPtrSet, but also
3244 /// provides iteration by insertion order, which is deterministic and stable
3245 /// across runs. It is also similar to SmallSetVector, but provides removing
3246 /// elements in O(1) time. This is achieved by not actually removing the element
3247 /// from the underlying vector, so comes at the cost of using more memory, but
3248 /// that is fine, since PhiNodeSets are used as short lived objects.
3249 class PhiNodeSet {
3250  friend class PhiNodeSetIterator;
3251 
3252  using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3253  using iterator = PhiNodeSetIterator;
3254 
3255  /// Keeps the elements in the order of their insertion in the underlying
3256  /// vector. To achieve constant time removal, it never deletes any element.
3258 
3259  /// Keeps the elements in the underlying set implementation. This (and not the
3260  /// NodeList defined above) is the source of truth on whether an element
3261  /// is actually in the collection.
3262  MapType NodeMap;
3263 
3264  /// Points to the first valid (not deleted) element when the set is not empty
3265  /// and the value is not zero. Equals to the size of the underlying vector
3266  /// when the set is empty. When the value is 0, as in the beginning, the
3267  /// first element may or may not be valid.
3268  size_t FirstValidElement = 0;
3269 
3270 public:
3271  /// Inserts a new element to the collection.
3272  /// \returns true if the element is actually added, i.e. was not in the
3273  /// collection before the operation.
3274  bool insert(PHINode *Ptr) {
3275  if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3276  NodeList.push_back(Ptr);
3277  return true;
3278  }
3279  return false;
3280  }
3281 
3282  /// Removes the element from the collection.
3283  /// \returns whether the element is actually removed, i.e. was in the
3284  /// collection before the operation.
3285  bool erase(PHINode *Ptr) {
3286  if (NodeMap.erase(Ptr)) {
3287  SkipRemovedElements(FirstValidElement);
3288  return true;
3289  }
3290  return false;
3291  }
3292 
3293  /// Removes all elements and clears the collection.
3294  void clear() {
3295  NodeMap.clear();
3296  NodeList.clear();
3297  FirstValidElement = 0;
3298  }
3299 
3300  /// \returns an iterator that will iterate the elements in the order of
3301  /// insertion.
3302  iterator begin() {
3303  if (FirstValidElement == 0)
3304  SkipRemovedElements(FirstValidElement);
3305  return PhiNodeSetIterator(this, FirstValidElement);
3306  }
3307 
3308  /// \returns an iterator that points to the end of the collection.
3309  iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3310 
3311  /// Returns the number of elements in the collection.
3312  size_t size() const {
3313  return NodeMap.size();
3314  }
3315 
3316  /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3317  size_t count(PHINode *Ptr) const {
3318  return NodeMap.count(Ptr);
3319  }
3320 
3321 private:
3322  /// Updates the CurrentIndex so that it will point to a valid element.
3323  ///
3324  /// If the element of NodeList at CurrentIndex is valid, it does not
3325  /// change it. If there are no more valid elements, it updates CurrentIndex
3326  /// to point to the end of the NodeList.
3327  void SkipRemovedElements(size_t &CurrentIndex) {
3328  while (CurrentIndex < NodeList.size()) {
3329  auto it = NodeMap.find(NodeList[CurrentIndex]);
3330  // If the element has been deleted and added again later, NodeMap will
3331  // point to a different index, so CurrentIndex will still be invalid.
3332  if (it != NodeMap.end() && it->second == CurrentIndex)
3333  break;
3334  ++CurrentIndex;
3335  }
3336  }
3337 };
3338 
3339 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3340  : Set(Set), CurrentIndex(Start) {}
3341 
3343  assert(CurrentIndex < Set->NodeList.size() &&
3344  "PhiNodeSet access out of range");
3345  return Set->NodeList[CurrentIndex];
3346 }
3347 
3348 PhiNodeSetIterator& PhiNodeSetIterator::operator++() {
3349  assert(CurrentIndex < Set->NodeList.size() &&
3350  "PhiNodeSet access out of range");
3351  ++CurrentIndex;
3352  Set->SkipRemovedElements(CurrentIndex);
3353  return *this;
3354 }
3355 
3356 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3357  return CurrentIndex == RHS.CurrentIndex;
3358 }
3359 
3360 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3361  return !((*this) == RHS);
3362 }
3363 
3364 /// Keep track of simplification of Phi nodes.
3365 /// Accept the set of all phi nodes and erase phi node from this set
3366 /// if it is simplified.
3367 class SimplificationTracker {
3369  const SimplifyQuery &SQ;
3370  // Tracks newly created Phi nodes. The elements are iterated by insertion
3371  // order.
3372  PhiNodeSet AllPhiNodes;
3373  // Tracks newly created Select nodes.
3374  SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3375 
3376 public:
3377  SimplificationTracker(const SimplifyQuery &sq)
3378  : SQ(sq) {}
3379 
3380  Value *Get(Value *V) {
3381  do {
3382  auto SV = Storage.find(V);
3383  if (SV == Storage.end())
3384  return V;
3385  V = SV->second;
3386  } while (true);
3387  }
3388 
3389  Value *Simplify(Value *Val) {
3390  SmallVector<Value *, 32> WorkList;
3391  SmallPtrSet<Value *, 32> Visited;
3392  WorkList.push_back(Val);
3393  while (!WorkList.empty()) {
3394  auto *P = WorkList.pop_back_val();
3395  if (!Visited.insert(P).second)
3396  continue;
3397  if (auto *PI = dyn_cast<Instruction>(P))
3398  if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3399  for (auto *U : PI->users())
3400  WorkList.push_back(cast<Value>(U));
3401  Put(PI, V);
3402  PI->replaceAllUsesWith(V);
3403  if (auto *PHI = dyn_cast<PHINode>(PI))
3404  AllPhiNodes.erase(PHI);
3405  if (auto *Select = dyn_cast<SelectInst>(PI))
3406  AllSelectNodes.erase(Select);
3407  PI->eraseFromParent();
3408  }
3409  }
3410  return Get(Val);
3411  }
3412 
3413  void Put(Value *From, Value *To) {
3414  Storage.insert({ From, To });
3415  }
3416 
3417  void ReplacePhi(PHINode *From, PHINode *To) {
3418  Value* OldReplacement = Get(From);
3419  while (OldReplacement != From) {
3420  From = To;
3421  To = dyn_cast<PHINode>(OldReplacement);
3422  OldReplacement = Get(From);
3423  }
3424  assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3425  Put(From, To);
3426  From->replaceAllUsesWith(To);
3427  AllPhiNodes.erase(From);
3428  From->eraseFromParent();
3429  }
3430 
3431  PhiNodeSet& newPhiNodes() { return AllPhiNodes; }
3432 
3433  void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3434 
3435  void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3436 
3437  unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3438 
3439  unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3440 
3441  void destroyNewNodes(Type *CommonType) {
3442  // For safe erasing, replace the uses with dummy value first.
3443  auto *Dummy = UndefValue::get(CommonType);
3444  for (auto *I : AllPhiNodes) {
3445  I->replaceAllUsesWith(Dummy);
3446  I->eraseFromParent();
3447  }
3448  AllPhiNodes.clear();
3449  for (auto *I : AllSelectNodes) {
3450  I->replaceAllUsesWith(Dummy);
3451  I->eraseFromParent();
3452  }
3453  AllSelectNodes.clear();
3454  }
3455 };
3456 
3457 /// A helper class for combining addressing modes.
3458 class AddressingModeCombiner {
3459  typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3460  typedef std::pair<PHINode *, PHINode *> PHIPair;
3461 
3462 private:
3463  /// The addressing modes we've collected.
3464  SmallVector<ExtAddrMode, 16> AddrModes;
3465 
3466  /// The field in which the AddrModes differ, when we have more than one.
3467  ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3468 
3469  /// Are the AddrModes that we have all just equal to their original values?
3470  bool AllAddrModesTrivial = true;
3471 
3472  /// Common Type for all different fields in addressing modes.
3473  Type *CommonType = nullptr;
3474 
3475  /// SimplifyQuery for simplifyInstruction utility.
3476  const SimplifyQuery &SQ;
3477 
3478  /// Original Address.
3479  Value *Original;
3480 
3481 public:
3482  AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3483  : SQ(_SQ), Original(OriginalValue) {}
3484 
3485  /// Get the combined AddrMode
3486  const ExtAddrMode &getAddrMode() const {
3487  return AddrModes[0];
3488  }
3489 
3490  /// Add a new AddrMode if it's compatible with the AddrModes we already
3491  /// have.
3492  /// \return True iff we succeeded in doing so.
3493  bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3494  // Take note of if we have any non-trivial AddrModes, as we need to detect
3495  // when all AddrModes are trivial as then we would introduce a phi or select
3496  // which just duplicates what's already there.
3497  AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3498 
3499  // If this is the first addrmode then everything is fine.
3500  if (AddrModes.empty()) {
3501  AddrModes.emplace_back(NewAddrMode);
3502  return true;
3503  }
3504 
3505  // Figure out how different this is from the other address modes, which we
3506  // can do just by comparing against the first one given that we only care
3507  // about the cumulative difference.
3508  ExtAddrMode::FieldName ThisDifferentField =
3509  AddrModes[0].compare(NewAddrMode);
3510  if (DifferentField == ExtAddrMode::NoField)
3511  DifferentField = ThisDifferentField;
3512  else if (DifferentField != ThisDifferentField)
3513  DifferentField = ExtAddrMode::MultipleFields;
3514 
3515  // If NewAddrMode differs in more than one dimension we cannot handle it.
3516  bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3517 
3518  // If Scale Field is different then we reject.
3519  CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3520 
3521  // We also must reject the case when base offset is different and
3522  // scale reg is not null, we cannot handle this case due to merge of
3523  // different offsets will be used as ScaleReg.
3524  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3525  !NewAddrMode.ScaledReg);
3526 
3527  // We also must reject the case when GV is different and BaseReg installed
3528  // due to we want to use base reg as a merge of GV values.
3529  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3530  !NewAddrMode.HasBaseReg);
3531 
3532  // Even if NewAddMode is the same we still need to collect it due to
3533  // original value is different. And later we will need all original values
3534  // as anchors during finding the common Phi node.
3535  if (CanHandle)
3536  AddrModes.emplace_back(NewAddrMode);
3537  else
3538  AddrModes.clear();
3539 
3540  return CanHandle;
3541  }
3542 
3543  /// Combine the addressing modes we've collected into a single
3544  /// addressing mode.
3545  /// \return True iff we successfully combined them or we only had one so
3546  /// didn't need to combine them anyway.
3547  bool combineAddrModes() {
3548  // If we have no AddrModes then they can't be combined.
3549  if (AddrModes.size() == 0)
3550  return false;
3551 
3552  // A single AddrMode can trivially be combined.
3553  if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3554  return true;
3555 
3556  // If the AddrModes we collected are all just equal to the value they are
3557  // derived from then combining them wouldn't do anything useful.
3558  if (AllAddrModesTrivial)
3559  return false;
3560 
3561  if (!addrModeCombiningAllowed())
3562  return false;
3563 
3564  // Build a map between <original value, basic block where we saw it> to
3565  // value of base register.
3566  // Bail out if there is no common type.
3567  FoldAddrToValueMapping Map;
3568  if (!initializeMap(Map))
3569  return false;
3570 
3571  Value *CommonValue = findCommon(Map);
3572  if (CommonValue)
3573  AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3574  return CommonValue != nullptr;
3575  }
3576 
3577 private:
3578  /// Initialize Map with anchor values. For address seen
3579  /// we set the value of different field saw in this address.
3580  /// At the same time we find a common type for different field we will
3581  /// use to create new Phi/Select nodes. Keep it in CommonType field.
3582  /// Return false if there is no common type found.
3583  bool initializeMap(FoldAddrToValueMapping &Map) {
3584  // Keep track of keys where the value is null. We will need to replace it
3585  // with constant null when we know the common type.
3586  SmallVector<Value *, 2> NullValue;
3587  Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3588  for (auto &AM : AddrModes) {
3589  Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3590  if (DV) {
3591  auto *Type = DV->getType();
3592  if (CommonType && CommonType != Type)
3593  return false;
3594  CommonType = Type;
3595  Map[AM.OriginalValue] = DV;
3596  } else {
3597  NullValue.push_back(AM.OriginalValue);
3598  }
3599  }
3600  assert(CommonType && "At least one non-null value must be!");
3601  for (auto *V : NullValue)
3602  Map[V] = Constant::getNullValue(CommonType);
3603  return true;
3604  }
3605 
3606  /// We have mapping between value A and other value B where B was a field in
3607  /// addressing mode represented by A. Also we have an original value C
3608  /// representing an address we start with. Traversing from C through phi and
3609  /// selects we ended up with A's in a map. This utility function tries to find
3610  /// a value V which is a field in addressing mode C and traversing through phi
3611  /// nodes and selects we will end up in corresponded values B in a map.
3612  /// The utility will create a new Phi/Selects if needed.
3613  // The simple example looks as follows:
3614  // BB1:
3615  // p1 = b1 + 40
3616  // br cond BB2, BB3
3617  // BB2:
3618  // p2 = b2 + 40
3619  // br BB3
3620  // BB3:
3621  // p = phi [p1, BB1], [p2, BB2]
3622  // v = load p
3623  // Map is
3624  // p1 -> b1
3625  // p2 -> b2
3626  // Request is
3627  // p -> ?
3628  // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3629  Value *findCommon(FoldAddrToValueMapping &Map) {
3630  // Tracks the simplification of newly created phi nodes. The reason we use
3631  // this mapping is because we will add new created Phi nodes in AddrToBase.
3632  // Simplification of Phi nodes is recursive, so some Phi node may
3633  // be simplified after we added it to AddrToBase. In reality this
3634  // simplification is possible only if original phi/selects were not
3635  // simplified yet.
3636  // Using this mapping we can find the current value in AddrToBase.
3637  SimplificationTracker ST(SQ);
3638 
3639  // First step, DFS to create PHI nodes for all intermediate blocks.
3640  // Also fill traverse order for the second step.
3641  SmallVector<Value *, 32> TraverseOrder;
3642  InsertPlaceholders(Map, TraverseOrder, ST);
3643 
3644  // Second Step, fill new nodes by merged values and simplify if possible.
3645  FillPlaceholders(Map, TraverseOrder, ST);
3646 
3647  if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3648  ST.destroyNewNodes(CommonType);
3649  return nullptr;
3650  }
3651 
3652  // Now we'd like to match New Phi nodes to existed ones.
3653  unsigned PhiNotMatchedCount = 0;
3654  if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3655  ST.destroyNewNodes(CommonType);
3656  return nullptr;
3657  }
3658 
3659  auto *Result = ST.Get(Map.find(Original)->second);
3660  if (Result) {
3661  NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3662  NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3663  }
3664  return Result;
3665  }
3666 
3667  /// Try to match PHI node to Candidate.
3668  /// Matcher tracks the matched Phi nodes.
3669  bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3670  SmallSetVector<PHIPair, 8> &Matcher,
3671  PhiNodeSet &PhiNodesToMatch) {
3672  SmallVector<PHIPair, 8> WorkList;
3673  Matcher.insert({ PHI, Candidate });
3674  SmallSet<PHINode *, 8> MatchedPHIs;
3675  MatchedPHIs.insert(PHI);
3676  WorkList.push_back({ PHI, Candidate });
3677  SmallSet<PHIPair, 8> Visited;
3678  while (!WorkList.empty()) {
3679  auto Item = WorkList.pop_back_val();
3680  if (!Visited.insert(Item).second)
3681  continue;
3682  // We iterate over all incoming values to Phi to compare them.
3683  // If values are different and both of them Phi and the first one is a
3684  // Phi we added (subject to match) and both of them is in the same basic
3685  // block then we can match our pair if values match. So we state that
3686  // these values match and add it to work list to verify that.
3687  for (auto B : Item.first->blocks()) {
3688  Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3689  Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3690  if (FirstValue == SecondValue)
3691  continue;
3692 
3693  PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3694  PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3695 
3696  // One of them is not Phi or
3697  // The first one is not Phi node from the set we'd like to match or
3698  // Phi nodes from different basic blocks then
3699  // we will not be able to match.
3700  if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3701  FirstPhi->getParent() != SecondPhi->getParent())
3702  return false;
3703 
3704  // If we already matched them then continue.
3705  if (Matcher.count({ FirstPhi, SecondPhi }))
3706  continue;
3707  // So the values are different and does not match. So we need them to
3708  // match. (But we register no more than one match per PHI node, so that
3709  // we won't later try to replace them twice.)
3710  if (MatchedPHIs.insert(FirstPhi).second)
3711  Matcher.insert({ FirstPhi, SecondPhi });
3712  // But me must check it.
3713  WorkList.push_back({ FirstPhi, SecondPhi });
3714  }
3715  }
3716  return true;
3717  }
3718 
3719  /// For the given set of PHI nodes (in the SimplificationTracker) try
3720  /// to find their equivalents.
3721  /// Returns false if this matching fails and creation of new Phi is disabled.
3722  bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3723  unsigned &PhiNotMatchedCount) {
3724  // Matched and PhiNodesToMatch iterate their elements in a deterministic
3725  // order, so the replacements (ReplacePhi) are also done in a deterministic
3726  // order.
3728  SmallPtrSet<PHINode *, 8> WillNotMatch;
3729  PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3730  while (PhiNodesToMatch.size()) {
3731  PHINode *PHI = *PhiNodesToMatch.begin();
3732 
3733  // Add us, if no Phi nodes in the basic block we do not match.
3734  WillNotMatch.clear();
3735  WillNotMatch.insert(PHI);
3736 
3737  // Traverse all Phis until we found equivalent or fail to do that.
3738  bool IsMatched = false;
3739  for (auto &P : PHI->getParent()->phis()) {
3740  // Skip new Phi nodes.
3741  if (PhiNodesToMatch.count(&P))
3742  continue;
3743  if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3744  break;
3745  // If it does not match, collect all Phi nodes from matcher.
3746  // if we end up with no match, them all these Phi nodes will not match
3747  // later.
3748  for (auto M : Matched)
3749  WillNotMatch.insert(M.first);
3750  Matched.clear();
3751  }
3752  if (IsMatched) {
3753  // Replace all matched values and erase them.
3754  for (auto MV : Matched)
3755  ST.ReplacePhi(MV.first, MV.second);
3756  Matched.clear();
3757  continue;
3758  }
3759  // If we are not allowed to create new nodes then bail out.
3760  if (!AllowNewPhiNodes)
3761  return false;
3762  // Just remove all seen values in matcher. They will not match anything.
3763  PhiNotMatchedCount += WillNotMatch.size();
3764  for (auto *P : WillNotMatch)
3765  PhiNodesToMatch.erase(P);
3766  }
3767  return true;
3768  }
3769  /// Fill the placeholders with values from predecessors and simplify them.
3770  void FillPlaceholders(FoldAddrToValueMapping &Map,
3771  SmallVectorImpl<Value *> &TraverseOrder,
3772  SimplificationTracker &ST) {
3773  while (!TraverseOrder.empty()) {
3774  Value *Current = TraverseOrder.pop_back_val();
3775  assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3776  Value *V = Map[Current];
3777 
3778  if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3779  // CurrentValue also must be Select.
3780  auto *CurrentSelect = cast<SelectInst>(Current);
3781  auto *TrueValue = CurrentSelect->getTrueValue();
3782  assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3783  Select->setTrueValue(ST.Get(Map[TrueValue]));
3784  auto *FalseValue = CurrentSelect->getFalseValue();
3785  assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3786  Select->setFalseValue(ST.Get(Map[FalseValue]));
3787  } else {
3788  // Must be a Phi node then.
3789  auto *PHI = cast<PHINode>(V);
3790  // Fill the Phi node with values from predecessors.
3791  for (auto *B : predecessors(PHI->getParent())) {
3792  Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3793  assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3794  PHI->addIncoming(ST.Get(Map[PV]), B);
3795  }
3796  }
3797  Map[Current] = ST.Simplify(V);
3798  }
3799  }
3800 
3801  /// Starting from original value recursively iterates over def-use chain up to
3802  /// known ending values represented in a map. For each traversed phi/select
3803  /// inserts a placeholder Phi or Select.
3804  /// Reports all new created Phi/Select nodes by adding them to set.
3805  /// Also reports and order in what values have been traversed.
3806  void InsertPlaceholders(FoldAddrToValueMapping &Map,
3807  SmallVectorImpl<Value *> &TraverseOrder,
3808  SimplificationTracker &ST) {
3809  SmallVector<Value *, 32> Worklist;
3810  assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3811  "Address must be a Phi or Select node");
3812  auto *Dummy = UndefValue::get(CommonType);
3813  Worklist.push_back(Original);
3814  while (!Worklist.empty()) {
3815  Value *Current = Worklist.pop_back_val();
3816  // if it is already visited or it is an ending value then skip it.
3817  if (Map.find(Current) != Map.end())
3818  continue;
3819  TraverseOrder.push_back(Current);
3820 
3821  // CurrentValue must be a Phi node or select. All others must be covered
3822  // by anchors.
3823  if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3824  // Is it OK to get metadata from OrigSelect?!
3825  // Create a Select placeholder with dummy value.
3826  SelectInst *Select = SelectInst::Create(
3827  CurrentSelect->getCondition(), Dummy, Dummy,
3828  CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3829  Map[Current] = Select;
3830  ST.insertNewSelect(Select);
3831  // We are interested in True and False values.
3832  Worklist.push_back(CurrentSelect->getTrueValue());
3833  Worklist.push_back(CurrentSelect->getFalseValue());
3834  } else {
3835  // It must be a Phi node then.
3836  PHINode *CurrentPhi = cast<PHINode>(Current);
3837  unsigned PredCount = CurrentPhi->getNumIncomingValues();
3838  PHINode *PHI =
3839  PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3840  Map[Current] = PHI;
3841  ST.insertNewPhi(PHI);
3842  append_range(Worklist, CurrentPhi->incoming_values());
3843  }
3844  }
3845  }
3846 
3847  bool addrModeCombiningAllowed() {
3849  return false;
3850  switch (DifferentField) {
3851  default:
3852  return false;
3853  case ExtAddrMode::BaseRegField:
3854  return AddrSinkCombineBaseReg;
3855  case ExtAddrMode::BaseGVField:
3856  return AddrSinkCombineBaseGV;
3857  case ExtAddrMode::BaseOffsField:
3858  return AddrSinkCombineBaseOffs;
3859  case ExtAddrMode::ScaledRegField:
3860  return AddrSinkCombineScaledReg;
3861  }
3862  }
3863 };
3864 } // end anonymous namespace
3865 
3866 /// Try adding ScaleReg*Scale to the current addressing mode.
3867 /// Return true and update AddrMode if this addr mode is legal for the target,
3868 /// false if not.
3869 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3870  unsigned Depth) {
3871  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3872  // mode. Just process that directly.
3873  if (Scale == 1)
3874  return matchAddr(ScaleReg, Depth);
3875 
3876  // If the scale is 0, it takes nothing to add this.
3877  if (Scale == 0)
3878  return true;
3879 
3880  // If we already have a scale of this value, we can add to it, otherwise, we
3881  // need an available scale field.
3882  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3883  return false;
3884 
3885  ExtAddrMode TestAddrMode = AddrMode;
3886 
3887  // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3888  // [A+B + A*7] -> [B+A*8].
3889  TestAddrMode.Scale += Scale;
3890  TestAddrMode.ScaledReg = ScaleReg;
3891 
3892  // If the new address isn't legal, bail out.
3893  if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3894  return false;
3895 
3896  // It was legal, so commit it.
3897  AddrMode = TestAddrMode;
3898 
3899  // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3900  // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3901  // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
3902  // go any further: we can reuse it and cannot eliminate it.
3903  ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
3904  if (isa<Instruction>(ScaleReg) && // not a constant expr.
3905  match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
3906  !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
3907  TestAddrMode.InBounds = false;
3908  TestAddrMode.ScaledReg = AddLHS;
3909  TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
3910 
3911  // If this addressing mode is legal, commit it and remember that we folded
3912  // this instruction.
3913  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
3914  AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
3915  AddrMode = TestAddrMode;
3916  return true;
3917  }
3918  // Restore status quo.
3919  TestAddrMode = AddrMode;
3920  }
3921 
3922  // If this is an add recurrence with a constant step, return the increment
3923  // instruction and the canonicalized step.
3924  auto GetConstantStep = [this](const Value * V)
3925  ->Optional<std::pair<Instruction *, APInt> > {
3926  auto *PN = dyn_cast<PHINode>(V);
3927  if (!PN)
3928  return None;
3929  auto IVInc = getIVIncrement(PN, &LI);
3930  if (!IVInc)
3931  return None;
3932  // TODO: The result of the intrinsics above is two-compliment. However when
3933  // IV inc is expressed as add or sub, iv.next is potentially a poison value.
3934  // If it has nuw or nsw flags, we need to make sure that these flags are
3935  // inferrable at the point of memory instruction. Otherwise we are replacing
3936  // well-defined two-compliment computation with poison. Currently, to avoid
3937  // potentially complex analysis needed to prove this, we reject such cases.
3938  if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
3939  if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
3940  return None;
3941  if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
3942  return std::make_pair(IVInc->first, ConstantStep->getValue());
3943  return None;
3944  };
3945 
3946  // Try to account for the following special case:
3947  // 1. ScaleReg is an inductive variable;
3948  // 2. We use it with non-zero offset;
3949  // 3. IV's increment is available at the point of memory instruction.
3950  //
3951  // In this case, we may reuse the IV increment instead of the IV Phi to
3952  // achieve the following advantages:
3953  // 1. If IV step matches the offset, we will have no need in the offset;
3954  // 2. Even if they don't match, we will reduce the overlap of living IV
3955  // and IV increment, that will potentially lead to better register
3956  // assignment.
3957  if (AddrMode.BaseOffs) {
3958  if (auto IVStep = GetConstantStep(ScaleReg)) {
3959  Instruction *IVInc = IVStep->first;
3960  // The following assert is important to ensure a lack of infinite loops.
3961  // This transforms is (intentionally) the inverse of the one just above.
3962  // If they don't agree on the definition of an increment, we'd alternate
3963  // back and forth indefinitely.
3964  assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
3965  APInt Step = IVStep->second;
3966  APInt Offset = Step * AddrMode.Scale;
3967  if (Offset.isSignedIntN(64)) {
3968  TestAddrMode.InBounds = false;
3969  TestAddrMode.ScaledReg = IVInc;
3970  TestAddrMode.BaseOffs -= Offset.getLimitedValue();
3971  // If this addressing mode is legal, commit it..
3972  // (Note that we defer the (expensive) domtree base legality check
3973  // to the very last possible point.)
3974  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
3975  getDTFn().dominates(IVInc, MemoryInst)) {
3976  AddrModeInsts.push_back(cast<Instruction>(IVInc));
3977  AddrMode = TestAddrMode;
3978  return true;
3979  }
3980  // Restore status quo.
3981  TestAddrMode = AddrMode;
3982  }
3983  }
3984  }
3985 
3986  // Otherwise, just return what we have.
3987  return true;
3988 }
3989 
3990 /// This is a little filter, which returns true if an addressing computation
3991 /// involving I might be folded into a load/store accessing it.
3992 /// This doesn't need to be perfect, but needs to accept at least
3993 /// the set of instructions that MatchOperationAddr can.
3995  switch (I->getOpcode()) {
3996  case Instruction::BitCast:
3997  case Instruction::AddrSpaceCast:
3998  // Don't touch identity bitcasts.
3999  if (I->getType() == I->getOperand(0)->getType())
4000  return false;
4001  return I->getType()->isIntOrPtrTy();
4002  case Instruction::PtrToInt:
4003  // PtrToInt is always a noop, as we know that the int type is pointer sized.
4004  return true;
4005  case Instruction::IntToPtr:
4006  // We know the input is intptr_t, so this is foldable.
4007  return true;
4008  case Instruction::Add:
4009  return true;
4010  case Instruction::Mul:
4011  case Instruction::Shl:
4012  // Can only handle X*C and X << C.
4013  return isa<ConstantInt>(I->getOperand(1));
4014  case Instruction::GetElementPtr:
4015  return true;
4016  default:
4017  return false;
4018  }
4019 }
4020 
4021 /// Check whether or not \p Val is a legal instruction for \p TLI.
4022 /// \note \p Val is assumed to be the product of some type promotion.
4023 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4024 /// to be legal, as the non-promoted value would have had the same state.
4026  const DataLayout &DL, Value *Val) {
4027  Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4028  if (!PromotedInst)
4029  return false;
4030  int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4031  // If the ISDOpcode is undefined, it was undefined before the promotion.
4032  if (!ISDOpcode)
4033  return true;
4034  // Otherwise, check if the promoted instruction is legal or not.
4035  return TLI.isOperationLegalOrCustom(
4036  ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4037 }
4038 
4039 namespace {
4040 
4041 /// Hepler class to perform type promotion.
4042 class TypePromotionHelper {
4043  /// Utility function to add a promoted instruction \p ExtOpnd to
4044  /// \p PromotedInsts and record the type of extension we have seen.
4045  static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4046  Instruction *ExtOpnd,
4047  bool IsSExt) {
4048  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4049  InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4050  if (It != PromotedInsts.end()) {
4051  // If the new extension is same as original, the information in
4052  // PromotedInsts[ExtOpnd] is still correct.
4053  if (It->second.getInt() == ExtTy)
4054  return;
4055 
4056  // Now the new extension is different from old extension, we make
4057  // the type information invalid by setting extension type to
4058  // BothExtension.
4059  ExtTy = BothExtension;
4060  }
4061  PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4062  }
4063 
4064  /// Utility function to query the original type of instruction \p Opnd
4065  /// with a matched extension type. If the extension doesn't match, we
4066  /// cannot use the information we had on the original type.
4067  /// BothExtension doesn't match any extension type.
4068  static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4069  Instruction *Opnd,
4070  bool IsSExt) {
4071  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4072  InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4073  if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4074  return It->second.getPointer();
4075  return nullptr;
4076  }
4077 
4078  /// Utility function to check whether or not a sign or zero extension
4079  /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4080  /// either using the operands of \p Inst or promoting \p Inst.
4081  /// The type of the extension is defined by \p IsSExt.
4082  /// In other words, check if:
4083  /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4084  /// #1 Promotion applies:
4085  /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4086  /// #2 Operand reuses:
4087  /// ext opnd1 to ConsideredExtType.
4088  /// \p PromotedInsts maps the instructions to their type before promotion.
4089  static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4090  const InstrToOrigTy &PromotedInsts, bool IsSExt);
4091 
4092  /// Utility function to determine if \p OpIdx should be promoted when
4093  /// promoting \p Inst.
4094  static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4095  return !(isa<SelectInst>(Inst) && OpIdx == 0);
4096  }
4097 
4098  /// Utility function to promote the operand of \p Ext when this
4099  /// operand is a promotable trunc or sext or zext.
4100  /// \p PromotedInsts maps the instructions to their type before promotion.
4101  /// \p CreatedInstsCost[out] contains the cost of all instructions
4102  /// created to promote the operand of Ext.
4103  /// Newly added extensions are inserted in \p Exts.
4104  /// Newly added truncates are inserted in \p Truncs.
4105  /// Should never be called directly.
4106  /// \return The promoted value which is used instead of Ext.
4107  static Value *promoteOperandForTruncAndAnyExt(
4108  Instruction *Ext, TypePromotionTransaction &TPT,
4109  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4111  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4112 
4113  /// Utility function to promote the operand of \p Ext when this
4114  /// operand is promotable and is not a supported trunc or sext.
4115  /// \p PromotedInsts maps the instructions to their type before promotion.
4116  /// \p CreatedInstsCost[out] contains the cost of all the instructions
4117  /// created to promote the operand of Ext.
4118  /// Newly added extensions are inserted in \p Exts.
4119  /// Newly added truncates are inserted in \p Truncs.
4120  /// Should never be called directly.
4121  /// \return The promoted value which is used instead of Ext.
4122  static Value *promoteOperandForOther(Instruction *Ext,
4123  TypePromotionTransaction &TPT,
4124  InstrToOrigTy &PromotedInsts,
4125  unsigned &CreatedInstsCost,
4128  const TargetLowering &TLI, bool IsSExt);
4129 
4130  /// \see promoteOperandForOther.
4131  static Value *signExtendOperandForOther(
4132  Instruction *Ext, TypePromotionTransaction &TPT,
4133  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4135  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4136  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4137  Exts, Truncs, TLI, true);
4138  }
4139 
4140  /// \see promoteOperandForOther.
4141  static Value *zeroExtendOperandForOther(
4142  Instruction *Ext, TypePromotionTransaction &TPT,
4143  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4145  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4146  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4147  Exts, Truncs, TLI, false);
4148  }
4149 
4150 public:
4151  /// Type for the utility function that promotes the operand of Ext.
4152  using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4153  InstrToOrigTy &PromotedInsts,
4154  unsigned &CreatedInstsCost,
4157  const TargetLowering &TLI);
4158 
4159  /// Given a sign/zero extend instruction \p Ext, return the appropriate
4160  /// action to promote the operand of \p Ext instead of using Ext.
4161  /// \return NULL if no promotable action is possible with the current
4162  /// sign extension.
4163  /// \p InsertedInsts keeps track of all the instructions inserted by the
4164  /// other CodeGenPrepare optimizations. This information is important
4165  /// because we do not want to promote these instructions as CodeGenPrepare
4166  /// will reinsert them later. Thus creating an infinite loop: create/remove.
4167  /// \p PromotedInsts maps the instructions to their type before promotion.
4168  static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4169  const TargetLowering &TLI,
4170  const InstrToOrigTy &PromotedInsts);
4171 };
4172 
4173 } // end anonymous namespace
4174 
4175 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4176  Type *ConsideredExtType,
4177  const InstrToOrigTy &PromotedInsts,
4178  bool IsSExt) {
4179  // The promotion helper does not know how to deal with vector types yet.
4180  // To be able to fix that, we would need to fix the places where we
4181  // statically extend, e.g., constants and such.
4182  if (Inst->getType()->isVectorTy())
4183  return false;
4184 
4185  // We can always get through zext.
4186  if (isa<ZExtInst>(Inst))
4187  return true;
4188 
4189  // sext(sext) is ok too.
4190  if (IsSExt && isa<SExtInst>(Inst))
4191  return true;
4192 
4193  // We can get through binary operator, if it is legal. In other words, the
4194  // binary operator must have a nuw or nsw flag.
4195  if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4196  if (isa<OverflowingBinaryOperator>(BinOp) &&
4197  ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4198  (IsSExt && BinOp->hasNoSignedWrap())))
4199  return true;
4200 
4201  // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4202  if ((Inst->getOpcode() == Instruction::And ||
4203  Inst->getOpcode() == Instruction::Or))
4204  return true;
4205 
4206  // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4207  if (Inst->getOpcode() == Instruction::Xor) {
4208  // Make sure it is not a NOT.
4209  if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4210  if (!Cst->getValue().isAllOnes())
4211  return true;
4212  }
4213 
4214  // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4215  // It may change a poisoned value into a regular value, like
4216  // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4217  // poisoned value regular value
4218  // It should be OK since undef covers valid value.
4219  if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4220  return true;
4221 
4222  // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4223  // It may change a poisoned value into a regular value, like
4224  // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4225  // poisoned value regular value
4226  // It should be OK since undef covers valid value.
4227  if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4228  const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4229  if (ExtInst->hasOneUse()) {
4230  const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4231  if (AndInst && AndInst->getOpcode() == Instruction::And) {
4232  const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4233  if (Cst &&
4234  Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4235  return true;
4236  }
4237  }
4238  }
4239 
4240  // Check if we can do the following simplification.
4241  // ext(trunc(opnd)) --> ext(opnd)
4242  if (!isa<TruncInst>(Inst))
4243  return false;
4244 
4245  Value *OpndVal = Inst->getOperand(0);
4246  // Check if we can use this operand in the extension.
4247  // If the type is larger than the result type of the extension, we cannot.
4248  if (!OpndVal->getType()->isIntegerTy() ||
4249  OpndVal->getType()->getIntegerBitWidth() >
4250  ConsideredExtType->getIntegerBitWidth())
4251  return false;
4252 
4253  // If the operand of the truncate is not an instruction, we will not have
4254  // any information on the dropped bits.
4255  // (Actually we could for constant but it is not worth the extra logic).
4256  Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4257  if (!Opnd)
4258  return false;
4259 
4260  // Check if the source of the type is narrow enough.
4261  // I.e., check that trunc just drops extended bits of the same kind of
4262  // the extension.
4263  // #1 get the type of the operand and check the kind of the extended bits.
4264  const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4265  if (OpndType)
4266  ;
4267  else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4268  OpndType = Opnd->getOperand(0)->getType();
4269  else
4270  return false;
4271 
4272  // #2 check that the truncate just drops extended bits.
4273  return Inst->getType()->getIntegerBitWidth() >=
4274  OpndType->getIntegerBitWidth();
4275 }
4276 
4277 TypePromotionHelper::Action TypePromotionHelper::getAction(
4278  Instruction *Ext, const SetOfInstrs &InsertedInsts,
4279  const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4280  assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4281  "Unexpected instruction type");
4282  Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4283  Type *ExtTy = Ext->getType();
4284  bool IsSExt = isa<SExtInst>(Ext);
4285  // If the operand of the extension is not an instruction, we cannot
4286  // get through.
4287  // If it, check we can get through.
4288  if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4289  return nullptr;
4290 
4291  // Do not promote if the operand has been added by codegenprepare.
4292  // Otherwise, it means we are undoing an optimization that is likely to be
4293  // redone, thus causing potential infinite loop.
4294  if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4295  return nullptr;
4296 
4297  // SExt or Trunc instructions.
4298  // Return the related handler.
4299  if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4300  isa<ZExtInst>(ExtOpnd))
4301  return promoteOperandForTruncAndAnyExt;
4302 
4303  // Regular instruction.
4304  // Abort early if we will have to insert non-free instructions.
4305  if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4306  return nullptr;
4307  return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4308 }
4309 
4310 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4311  Instruction *SExt, TypePromotionTransaction &TPT,
4312  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4314  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4315  // By construction, the operand of SExt is an instruction. Otherwise we cannot
4316  // get through it and this method should not be called.
4317  Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4318  Value *ExtVal = SExt;
4319  bool HasMergedNonFreeExt = false;
4320  if (isa<ZExtInst>(SExtOpnd)) {
4321  // Replace s|zext(zext(opnd))
4322  // => zext(opnd).
4323  HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4324  Value *ZExt =
4325  TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4326  TPT.replaceAllUsesWith(SExt, ZExt);
4327  TPT.eraseInstruction(SExt);
4328  ExtVal = ZExt;
4329  } else {
4330  // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4331  // => z|sext(opnd).
4332  TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4333  }
4334  CreatedInstsCost = 0;
4335 
4336  // Remove dead code.
4337  if (SExtOpnd->use_empty())
4338  TPT.eraseInstruction(SExtOpnd);
4339 
4340  // Check if the extension is still needed.
4341  Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4342  if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4343  if (ExtInst) {
4344  if (Exts)
4345  Exts->push_back(ExtInst);
4346  CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4347  }
4348  return ExtVal;
4349  }
4350 
4351  // At this point we have: ext ty opnd to ty.
4352  // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4353  Value *NextVal = ExtInst->getOperand(0);
4354  TPT.eraseInstruction(ExtInst, NextVal);
4355  return NextVal;
4356 }
4357 
4358 Value *TypePromotionHelper::promoteOperandForOther(
4359  Instruction *Ext, TypePromotionTransaction &TPT,
4360  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4362  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4363  bool IsSExt) {
4364  // By construction, the operand of Ext is an instruction. Otherwise we cannot
4365  // get through it and this method should not be called.
4366  Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4367  CreatedInstsCost = 0;
4368  if (!ExtOpnd->hasOneUse()) {
4369  // ExtOpnd will be promoted.
4370  // All its uses, but Ext, will need to use a truncated value of the
4371  // promoted version.
4372  // Create the truncate now.
4373  Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4374  if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4375  // Insert it just after the definition.
4376  ITrunc->moveAfter(ExtOpnd);
4377  if (Truncs)
4378  Truncs->push_back(ITrunc);
4379  }
4380 
4381  TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4382  // Restore the operand of Ext (which has been replaced by the previous call
4383  // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4384  TPT.setOperand(Ext, 0, ExtOpnd);
4385  }
4386 
4387  // Get through the Instruction:
4388  // 1. Update its type.
4389  // 2. Replace the uses of Ext by Inst.
4390  // 3. Extend each operand that needs to be extended.
4391 
4392  // Remember the original type of the instruction before promotion.
4393  // This is useful to know that the high bits are sign extended bits.
4394  addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4395  // Step #1.
4396  TPT.mutateType(ExtOpnd, Ext->getType());
4397  // Step #2.
4398  TPT.replaceAllUsesWith(Ext, ExtOpnd);
4399  // Step #3.
4400  Instruction *ExtForOpnd = Ext;
4401 
4402  LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4403  for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4404  ++OpIdx) {
4405  LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4406  if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4407  !shouldExtOperand(ExtOpnd, OpIdx)) {
4408  LLVM_DEBUG(dbgs() << "No need to propagate\n");
4409  continue;
4410  }
4411  // Check if we can statically extend the operand.
4412  Value *Opnd = ExtOpnd->getOperand(OpIdx);
4413  if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4414  LLVM_DEBUG(dbgs() << "Statically extend\n");
4415  unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4416  APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4417  : Cst->getValue().zext(BitWidth);
4418  TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4419  continue;
4420  }
4421  // UndefValue are typed, so we have to statically sign extend them.
4422  if (isa<UndefValue>(Opnd)) {
4423  LLVM_DEBUG(dbgs() << "Statically extend\n");
4424  TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4425  continue;
4426  }
4427 
4428  // Otherwise we have to explicitly sign extend the operand.
4429  // Check if Ext was reused to extend an operand.
4430  if (!ExtForOpnd) {
4431  // If yes, create a new one.
4432  LLVM_DEBUG(dbgs() << "More operands to ext\n");
4433  Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4434  : TPT.createZExt(Ext, Opnd, Ext->getType());
4435  if (!isa<Instruction>(ValForExtOpnd)) {
4436  TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4437  continue;
4438  }
4439  ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4440  }
4441  if (Exts)
4442  Exts->push_back(ExtForOpnd);
4443  TPT.setOperand(ExtForOpnd, 0, Opnd);
4444 
4445  // Move the sign extension before the insertion point.
4446  TPT.moveBefore(ExtForOpnd, ExtOpnd);
4447  TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4448  CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4449  // If more sext are required, new instructions will have to be created.
4450  ExtForOpnd = nullptr;
4451  }
4452  if (ExtForOpnd == Ext) {
4453  LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4454  TPT.eraseInstruction(Ext);
4455  }
4456  return ExtOpnd;
4457 }
4458 
4459 /// Check whether or not promoting an instruction to a wider type is profitable.
4460 /// \p NewCost gives the cost of extension instructions created by the
4461 /// promotion.
4462 /// \p OldCost gives the cost of extension instructions before the promotion
4463 /// plus the number of instructions that have been
4464 /// matched in the addressing mode the promotion.
4465 /// \p PromotedOperand is the value that has been promoted.
4466 /// \return True if the promotion is profitable, false otherwise.
4467 bool AddressingModeMatcher::isPromotionProfitable(
4468  unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4469  LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4470  << '\n');
4471  // The cost of the new extensions is greater than the cost of the
4472  // old extension plus what we folded.
4473  // This is not profitable.
4474  if (NewCost > OldCost)
4475  return false;
4476  if (NewCost < OldCost)
4477  return true;
4478  // The promotion is neutral but it may help folding the sign extension in
4479  // loads for instance.
4480  // Check that we did not create an illegal instruction.
4481  return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4482 }
4483 
4484 /// Given an instruction or constant expr, see if we can fold the operation
4485 /// into the addressing mode. If so, update the addressing mode and return
4486 /// true, otherwise return false without modifying AddrMode.
4487 /// If \p MovedAway is not NULL, it contains the information of whether or
4488 /// not AddrInst has to be folded into the addressing mode on success.
4489 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4490 /// because it has been moved away.
4491 /// Thus AddrInst must not be added in the matched instructions.
4492 /// This state can happen when AddrInst is a sext, since it may be moved away.
4493 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4494 /// not be referenced anymore.
4495 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4496  unsigned Depth,
4497  bool *MovedAway) {
4498  // Avoid exponential behavior on extremely deep expression trees.
4499  if (Depth >= 5) return false;
4500 
4501  // By default, all matched instructions stay in place.
4502  if (MovedAway)
4503  *MovedAway = false;
4504 
4505  switch (Opcode) {
4506  case Instruction::PtrToInt:
4507  // PtrToInt is always a noop, as we know that the int type is pointer sized.
4508  return matchAddr(AddrInst->getOperand(0), Depth);
4509  case Instruction::IntToPtr: {
4510  auto AS = AddrInst->getType()->getPointerAddressSpace();
4511  auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4512  // This inttoptr is a no-op if the integer type is pointer sized.
4513  if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4514  return matchAddr(AddrInst->getOperand(0), Depth);
4515  return false;
4516  }
4517  case Instruction::BitCast:
4518  // BitCast is always a noop, and we can handle it as long as it is
4519  // int->int or pointer->pointer (we don't want int<->fp or something).
4520  if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4521  // Don't touch identity bitcasts. These were probably put here by LSR,
4522  // and we don't want to mess around with them. Assume it knows what it
4523  // is doing.
4524  AddrInst->getOperand(0)->getType() != AddrInst->getType())
4525  return matchAddr(AddrInst->getOperand(0), Depth);
4526  return false;
4527  case Instruction::AddrSpaceCast: {
4528  unsigned SrcAS
4529  = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4530  unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4531  if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4532  return matchAddr(AddrInst->getOperand(0), Depth);
4533  return false;
4534  }
4535  case Instruction::Add: {
4536  // Check to see if we can merge in the RHS then the LHS. If so, we win.
4537  ExtAddrMode BackupAddrMode = AddrMode;
4538  unsigned OldSize = AddrModeInsts.size();
4539  // Start a transaction at this point.
4540  // The LHS may match but not the RHS.
4541  // Therefore, we need a higher level restoration point to undo partially
4542  // matched operation.
4543  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4544  TPT.getRestorationPoint();
4545 
4546  AddrMode.InBounds = false;
4547  if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
4548  matchAddr(AddrInst->getOperand(0), Depth+1))
4549  return true;
4550 
4551  // Restore the old addr mode info.
4552  AddrMode = BackupAddrMode;
4553  AddrModeInsts.resize(OldSize);
4554  TPT.rollback(LastKnownGood);
4555 
4556  // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4557  if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
4558  matchAddr(AddrInst->getOperand(1), Depth+1))
4559  return true;
4560 
4561  // Otherwise we definitely can't merge the ADD in.
4562  AddrMode = BackupAddrMode;
4563  AddrModeInsts.resize(OldSize);
4564  TPT.rollback(LastKnownGood);
4565  break;
4566  }
4567  //case Instruction::Or:
4568  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4569  //break;
4570  case Instruction::Mul:
4571  case Instruction::Shl: {
4572  // Can only handle X*C and X << C.
4573  AddrMode.InBounds = false;
4574  ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4575  if (!RHS || RHS->getBitWidth() > 64)
4576  return false;
4577  int64_t Scale = Opcode == Instruction::Shl
4578  ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
4579  : RHS->getSExtValue();
4580 
4581  return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4582  }
4583  case Instruction::GetElementPtr: {
4584  // Scan the GEP. We check it if it contains constant offsets and at most
4585  // one variable offset.
4586  int VariableOperand = -1;
4587  unsigned VariableScale = 0;
4588 
4589  int64_t ConstantOffset = 0;
4590  gep_type_iterator GTI = gep_type_begin(AddrInst);
4591  for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4592  if (StructType *STy = GTI.getStructTypeOrNull()) {
4593  const StructLayout *SL = DL.getStructLayout(STy);
4594  unsigned Idx =
4595  cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4596  ConstantOffset += SL->getElementOffset(Idx);
4597  } else {
4598  TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType());
4599  if (TS.isNonZero()) {
4600  // The optimisations below currently only work for fixed offsets.
4601  if (TS.isScalable())
4602  return false;
4603  int64_t TypeSize = TS.getFixedSize();
4604  if (ConstantInt *CI =
4605  dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4606  const APInt &CVal = CI->getValue();
4607  if (CVal.getMinSignedBits() <= 64) {
4608  ConstantOffset += CVal.getSExtValue() * TypeSize;
4609  continue;
4610  }
4611  }
4612  // We only allow one variable index at the moment.
4613  if (VariableOperand != -1)
4614  return false;
4615 
4616  // Remember the variable index.
4617  VariableOperand = i;
4618  VariableScale = TypeSize;
4619  }
4620  }
4621  }
4622 
4623  // A common case is for the GEP to only do a constant offset. In this case,
4624  // just add it to the disp field and check validity.
4625  if (VariableOperand == -1) {
4626  AddrMode.BaseOffs += ConstantOffset;
4627  if (ConstantOffset == 0 ||
4628  TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4629  // Check to see if we can fold the base pointer in too.
4630  if (matchAddr(AddrInst->getOperand(0), Depth+1)) {
4631  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4632  AddrMode.InBounds = false;
4633  return true;
4634  }
4635  } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4636  TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4637  ConstantOffset > 0) {
4638  // Record GEPs with non-zero offsets as candidates for splitting in the
4639  // event that the offset cannot fit into the r+i addressing mode.
4640  // Simple and common case that only one GEP is used in calculating the
4641  // address for the memory access.
4642  Value *Base = AddrInst->getOperand(0);
4643  auto *BaseI = dyn_cast<Instruction>(Base);
4644  auto *GEP = cast<GetElementPtrInst>(AddrInst);
4645  if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4646  (BaseI && !isa<CastInst>(BaseI) &&
4647  !isa<GetElementPtrInst>(BaseI))) {
4648  // Make sure the parent block allows inserting non-PHI instructions
4649  // before the terminator.
4650  BasicBlock *Parent =
4651  BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4652  if (!Parent->getTerminator()->isEHPad())
4653  LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4654  }
4655  }
4656  AddrMode.BaseOffs -= ConstantOffset;
4657  return false;
4658  }
4659 
4660  // Save the valid addressing mode in case we can't match.
4661  ExtAddrMode BackupAddrMode = AddrMode;
4662  unsigned OldSize = AddrModeInsts.size();
4663 
4664  // See if the scale and offset amount is valid for this target.
4665  AddrMode.BaseOffs += ConstantOffset;
4666  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4667  AddrMode.InBounds = false;
4668 
4669  // Match the base operand of the GEP.
4670  if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
4671  // If it couldn't be matched, just stuff the value in a register.
4672  if (AddrMode.HasBaseReg) {
4673  AddrMode = BackupAddrMode;
4674  AddrModeInsts.resize(OldSize);
4675  return false;
4676  }
4677  AddrMode.HasBaseReg = true;
4678  AddrMode.BaseReg = AddrInst->getOperand(0);
4679  }
4680 
4681  // Match the remaining variable portion of the GEP.
4682  if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4683  Depth)) {
4684  // If it couldn't be matched, try stuffing the base into a register
4685  // instead of matching it, and retrying the match of the scale.
4686  AddrMode = BackupAddrMode;
4687  AddrModeInsts.resize(OldSize);
4688  if (AddrMode.HasBaseReg)
4689  return false;
4690  AddrMode.HasBaseReg = true;
4691  AddrMode.BaseReg = AddrInst->getOperand(0);
4692  AddrMode.BaseOffs += ConstantOffset;
4693  if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4694  VariableScale, Depth)) {
4695  // If even that didn't work, bail.
4696  AddrMode = BackupAddrMode;
4697  AddrModeInsts.resize(OldSize);
4698  return false;
4699  }
4700  }
4701 
4702  return true;
4703  }
4704  case Instruction::SExt:
4705  case Instruction::ZExt: {
4706  Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4707  if (!Ext)
4708  return false;
4709 
4710  // Try to move this ext out of the way of the addressing mode.
4711  // Ask for a method for doing so.
4712  TypePromotionHelper::Action TPH =
4713  TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4714  if (!TPH)
4715  return false;
4716 
4717  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4718  TPT.getRestorationPoint();
4719  unsigned CreatedInstsCost = 0;
4720  unsigned ExtCost = !TLI.isExtFree(Ext);
4721  Value *PromotedOperand =
4722  TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4723  // SExt has been moved away.
4724  // Thus either it will be rematched later in the recursive calls or it is
4725  // gone. Anyway, we must not fold it into the addressing mode at this point.
4726  // E.g.,
4727  // op = add opnd, 1
4728  // idx = ext op
4729  // addr = gep base, idx
4730  // is now:
4731  // promotedOpnd = ext opnd <- no match here
4732  // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4733  // addr = gep base, op <- match
4734  if (MovedAway)
4735  *MovedAway = true;
4736 
4737  assert(PromotedOperand &&
4738  "TypePromotionHelper should have filtered out those cases");
4739 
4740  ExtAddrMode BackupAddrMode = AddrMode;
4741  unsigned OldSize = AddrModeInsts.size();
4742 
4743  if (!matchAddr(PromotedOperand, Depth) ||
4744  // The total of the new cost is equal to the cost of the created
4745  // instructions.
4746  // The total of the old cost is equal to the cost of the extension plus
4747  // what we have saved in the addressing mode.
4748  !isPromotionProfitable(CreatedInstsCost,
4749  ExtCost + (AddrModeInsts.size() - OldSize),
4750  PromotedOperand)) {
4751  AddrMode = BackupAddrMode;
4752  AddrModeInsts.resize(OldSize);
4753  LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4754  TPT.rollback(LastKnownGood);
4755  return false;
4756  }
4757  return true;
4758  }
4759  }
4760  return false;
4761 }
4762 
4763 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4764 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4765 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4766 /// for the target.
4767 ///
4768 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4769  // Start a transaction at this point that we will rollback if the matching
4770  // fails.
4771  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4772  TPT.getRestorationPoint();
4773  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4774  if (CI->getValue().isSignedIntN(64)) {
4775  // Fold in immediates if legal for the target.
4776  AddrMode.BaseOffs += CI->getSExtValue();
4777  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4778  return true;
4779  AddrMode.BaseOffs -= CI->getSExtValue();
4780  }
4781  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4782  // If this is a global variable, try to fold it into the addressing mode.
4783  if (!AddrMode.BaseGV) {
4784  AddrMode.BaseGV = GV;
4785  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4786  return true;
4787  AddrMode.BaseGV = nullptr;
4788  }
4789  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4790  ExtAddrMode BackupAddrMode = AddrMode;
4791  unsigned OldSize = AddrModeInsts.size();
4792 
4793  // Check to see if it is possible to fold this operation.
4794  bool MovedAway = false;
4795  if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4796  // This instruction may have been moved away. If so, there is nothing
4797  // to check here.
4798  if (MovedAway)
4799  return true;
4800  // Okay, it's possible to fold this. Check to see if it is actually
4801  // *profitable* to do so. We use a simple cost model to avoid increasing
4802  // register pressure too much.
4803  if (I->hasOneUse() ||
4804  isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4805  AddrModeInsts.push_back(I);
4806  return true;
4807  }
4808 
4809  // It isn't profitable to do this, roll back.
4810  AddrMode = BackupAddrMode;
4811  AddrModeInsts.resize(OldSize);
4812  TPT.rollback(LastKnownGood);
4813  }
4814  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4815  if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4816  return true;
4817  TPT.rollback(LastKnownGood);
4818  } else if (isa<ConstantPointerNull>(Addr)) {
4819  // Null pointer gets folded without affecting the addressing mode.
4820  return true;
4821  }
4822 
4823  // Worse case, the target should support [reg] addressing modes. :)
4824  if (!AddrMode.HasBaseReg) {
4825  AddrMode.HasBaseReg = true;
4826  AddrMode.BaseReg = Addr;
4827  // Still check for legality in case the target supports [imm] but not [i+r].
4828  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4829  return true;
4830  AddrMode.HasBaseReg = false;
4831  AddrMode.BaseReg = nullptr;
4832  }
4833 
4834  // If the base register is already taken, see if we can do [r+r].
4835  if (AddrMode.Scale == 0) {
4836  AddrMode.Scale = 1;
4837  AddrMode.ScaledReg = Addr;
4838  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4839  return true;
4840  AddrMode.Scale = 0;
4841  AddrMode.ScaledReg = nullptr;
4842  }
4843  // Couldn't match.
4844  TPT.rollback(LastKnownGood);
4845  return false;
4846 }
4847 
4848 /// Check to see if all uses of OpVal by the specified inline asm call are due
4849 /// to memory operands. If so, return true, otherwise return false.
4850 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4851  const TargetLowering &TLI,
4852  const TargetRegisterInfo &TRI) {
4853  const Function *F = CI->getFunction();
4854  TargetLowering::AsmOperandInfoVector TargetConstraints =
4855  TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4856 
4857  for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
4858  // Compute the constraint code and ConstraintType to use.
4859  TLI.ComputeConstraintToUse(OpInfo, SDValue());
4860 
4861  // If this asm operand is our Value*, and if it isn't an indirect memory
4862  // operand, we can't fold it! TODO: Also handle C_Address?
4863  if (OpInfo.CallOperandVal == OpVal &&
4864  (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4865  !OpInfo.isIndirect))
4866  return false;
4867  }
4868 
4869  return true;
4870 }
4871 
4872 // Max number of memory uses to look at before aborting the search to conserve
4873 // compile time.
4874 static constexpr int MaxMemoryUsesToScan = 20;
4875 
4876 /// Recursively walk all the uses of I until we find a memory use.
4877 /// If we find an obviously non-foldable instruction, return true.
4878 /// Add accessed addresses and types to MemoryUses.
4879 static bool FindAllMemoryUses(
4880  Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses,
4881  SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4882  const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4883  BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4884  // If we already considered this instruction, we're done.
4885  if (!ConsideredInsts.insert(I).second)
4886  return false;
4887 
4888  // If this is an obviously unfoldable instruction, bail out.
4889  if (!MightBeFoldableInst(I))
4890  return true;
4891 
4892  // Loop over all the uses, recursively processing them.
4893  for (Use &U : I->uses()) {
4894  // Conservatively return true if we're seeing a large number or a deep chain
4895  // of users. This avoids excessive compilation times in pathological cases.
4896  if (SeenInsts++ >= MaxMemoryUsesToScan)
4897  return true;
4898 
4899  Instruction *UserI = cast<Instruction>(U.getUser());
4900  if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4901  MemoryUses.push_back({U.get(), LI->getType()});
4902  continue;
4903  }
4904 
4905  if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
4906  if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
4907  return true; // Storing addr, not into addr.
4908  MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()});
4909  continue;
4910  }
4911 
4912  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
4913  if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
4914  return true; // Storing addr, not into addr.
4915  MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()});
4916  continue;
4917  }
4918 
4919  if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
4920  if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
4921  return true; // Storing addr, not into addr.
4922  MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()});
4923  continue;
4924  }
4925 
4926  if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
4927  if (CI->hasFnAttr(Attribute::Cold)) {
4928  // If this is a cold call, we can sink the addressing calculation into
4929  // the cold path. See optimizeCallInst
4930  bool OptForSize = OptSize ||
4931  llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
4932  if (!OptForSize)
4933  continue;
4934  }
4935 
4936  InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
4937  if (!IA) return true;
4938 
4939  // If this is a memory operand, we're cool, otherwise bail out.
4940  if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
4941  return true;
4942  continue;
4943  }
4944 
4945  if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4946  PSI, BFI, SeenInsts))
4947  return true;
4948  }
4949 
4950  return false;
4951 }
4952 
4953 /// Return true if Val is already known to be live at the use site that we're
4954 /// folding it into. If so, there is no cost to include it in the addressing
4955 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4956 /// instruction already.
4957 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
4958  Value *KnownLive2) {
4959  // If Val is either of the known-live values, we know it is live!
4960  if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
4961  return true;
4962 
4963  // All values other than instructions and arguments (e.g. constants) are live.
4964  if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
4965 
4966  // If Val is a constant sized alloca in the entry block, it is live, this is
4967  // true because it is just a reference to the stack/frame pointer, which is
4968  // live for the whole function.
4969  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
4970  if (AI->isStaticAlloca())
4971  return true;
4972 
4973  // Check to see if this value is already used in the memory instruction's
4974  // block. If so, it's already live into the block at the very least, so we
4975  // can reasonably fold it.
4976  return Val->isUsedInBasicBlock(MemoryInst->getParent());
4977 }
4978 
4979 /// It is possible for the addressing mode of the machine to fold the specified
4980 /// instruction into a load or store that ultimately uses it.
4981 /// However, the specified instruction has multiple uses.
4982 /// Given this, it may actually increase register pressure to fold it
4983 /// into the load. For example, consider this code:
4984 ///
4985 /// X = ...
4986 /// Y = X+1
4987 /// use(Y) -> nonload/store
4988 /// Z = Y+1
4989 /// load Z
4990 ///
4991 /// In this case, Y has multiple uses, and can be folded into the load of Z
4992 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
4993 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
4994 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
4995 /// number of computations either.
4996 ///
4997 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
4998 /// X was live across 'load Z' for other reasons, we actually *would* want to
4999 /// fold the addressing mode in the Z case. This would make Y die earlier.
5000 bool AddressingModeMatcher::
5001 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
5002  ExtAddrMode &AMAfter) {
5003  if (IgnoreProfitability) return true;
5004 
5005  // AMBefore is the addressing mode before this instruction was folded into it,
5006  // and AMAfter is the addressing mode after the instruction was folded. Get
5007  // the set of registers referenced by AMAfter and subtract out those
5008  // referenced by AMBefore: this is the set of values which folding in this
5009  // address extends the lifetime of.
5010  //
5011  // Note that there are only two potential values being referenced here,
5012  // BaseReg and ScaleReg (global addresses are always available, as are any
5013  // folded immediates).
5014  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5015 
5016  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5017  // lifetime wasn't extended by adding this instruction.
5018  if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5019  BaseReg = nullptr;
5020  if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5021  ScaledReg = nullptr;
5022 
5023  // If folding this instruction (and it's subexprs) didn't extend any live
5024  // ranges, we're ok with it.
5025  if (!BaseReg && !ScaledReg)
5026  return true;
5027 
5028  // If all uses of this instruction can have the address mode sunk into them,
5029  // we can remove the addressing mode and effectively trade one live register
5030  // for another (at worst.) In this context, folding an addressing mode into
5031  // the use is just a particularly nice way of sinking it.
5033  SmallPtrSet<Instruction*, 16> ConsideredInsts;
5034  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5035  PSI, BFI))
5036  return false; // Has a non-memory, non-foldable use!
5037 
5038  // Now that we know that all uses of this instruction are part of a chain of
5039  // computation involving only operations that could theoretically be folded
5040  // into a memory use, loop over each of these memory operation uses and see
5041  // if they could *actually* fold the instruction. The assumption is that
5042  // addressing modes are cheap and that duplicating the computation involved
5043  // many times is worthwhile, even on a fastpath. For sinking candidates
5044  // (i.e. cold call sites), this serves as a way to prevent excessive code
5045  // growth since most architectures have some reasonable small and fast way to
5046  // compute an effective address. (i.e LEA on x86)
5047  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
5048  for (const std::pair<Value *, Type *> &Pair : MemoryUses) {
5049  Value *Address = Pair.first;
5050  Type *AddressAccessTy = Pair.second;
5051  unsigned AS = Address->getType()->getPointerAddressSpace();
5052 
5053  // Do a match against the root of this address, ignoring profitability. This
5054  // will tell us if the addressing mode for the memory operation will
5055  // *actually* cover the shared instruction.
5057  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5058  0);
5059  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5060  TPT.getRestorationPoint();
5061  AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5062  AddressAccessTy, AS, MemoryInst, Result,
5063  InsertedInsts, PromotedInsts, TPT,
5064  LargeOffsetGEP, OptSize, PSI, BFI);
5065  Matcher.IgnoreProfitability = true;
5066  bool Success = Matcher.matchAddr(Address, 0);
5067  (void)Success; assert(Success && "Couldn't select *anything*?");
5068 
5069  // The match was to check the profitability, the changes made are not
5070  // part of the original matcher. Therefore, they should be dropped
5071  // otherwise the original matcher will not present the right state.
5072  TPT.rollback(LastKnownGood);
5073 
5074  // If the match didn't cover I, then it won't be shared by it.
5075  if (!is_contained(MatchedAddrModeInsts, I))
5076  return false;
5077 
5078  MatchedAddrModeInsts.clear();
5079  }
5080 
5081  return true;
5082 }
5083 
5084 /// Return true if the specified values are defined in a
5085 /// different basic block than BB.
5086 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5087  if (Instruction *I = dyn_cast<Instruction>(V))
5088  return I->getParent() != BB;
5089  return false;
5090 }
5091 
5092 /// Sink addressing mode computation immediate before MemoryInst if doing so
5093 /// can be done without increasing register pressure. The need for the
5094 /// register pressure constraint means this can end up being an all or nothing
5095 /// decision for all uses of the same addressing computation.
5096 ///
5097 /// Load and Store Instructions often have addressing modes that can do
5098 /// significant amounts of computation. As such, instruction selection will try
5099 /// to get the load or store to do as much computation as possible for the
5100 /// program. The problem is that isel can only see within a single block. As
5101 /// such, we sink as much legal addressing mode work into the block as possible.
5102 ///
5103 /// This method is used to optimize both load/store and inline asms with memory
5104 /// operands. It's also used to sink addressing computations feeding into cold
5105 /// call sites into their (cold) basic block.
5106 ///
5107 /// The motivation for handling sinking into cold blocks is that doing so can
5108 /// both enable other address mode sinking (by satisfying the register pressure
5109 /// constraint above), and reduce register pressure globally (by removing the
5110 /// addressing mode computation from the fast path entirely.).
5111 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5112  Type *AccessTy, unsigned AddrSpace) {
5113  Value *Repl = Addr;
5114 
5115  // Try to collapse single-value PHI nodes. This is necessary to undo
5116  // unprofitable PRE transformations.
5117  SmallVector<Value*, 8> worklist;
5118  SmallPtrSet<Value*, 16> Visited;
5119  worklist.push_back(Addr);
5120 
5121  // Use a worklist to iteratively look through PHI and select nodes, and
5122  // ensure that the addressing mode obtained from the non-PHI/select roots of
5123  // the graph are compatible.
5124  bool PhiOrSelectSeen = false;
5125  SmallVector<Instruction*, 16> AddrModeInsts;
5126  const SimplifyQuery SQ(*DL, TLInfo);
5127  AddressingModeCombiner AddrModes(SQ, Addr);
5128  TypePromotionTransaction TPT(RemovedInsts);
5129  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5130  TPT.getRestorationPoint();
5131  while (!worklist.empty()) {
5132  Value *V = worklist.pop_back_val();
5133 
5134  // We allow traversing cyclic Phi nodes.
5135  // In case of success after this loop we ensure that traversing through
5136  // Phi nodes ends up with all cases to compute address of the form
5137  // BaseGV + Base + Scale * Index + Offset
5138  // where Scale and Offset are constans and BaseGV, Base and Index
5139  // are exactly the same Values in all cases.
5140  // It means that BaseGV, Scale and Offset dominate our memory instruction
5141  // and have the same value as they had in address computation represented
5142  // as Phi. So we can safely sink address computation to memory instruction.
5143  if (!Visited.insert(V).second)
5144  continue;
5145 
5146  // For a PHI node, push all of its incoming values.
5147  if (PHINode *P = dyn_cast<PHINode>(V)) {
5148  append_range(worklist, P->incoming_values());
5149  PhiOrSelectSeen = true;
5150  continue;
5151  }
5152  // Similar for select.
5153  if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5154  worklist.push_back(SI->getFalseValue());
5155  worklist.push_back(SI->getTrueValue());
5156  PhiOrSelectSeen = true;
5157  continue;
5158  }
5159 
5160  // For non-PHIs, determine the addressing mode being computed. Note that
5161  // the result may differ depending on what other uses our candidate
5162  // addressing instructions might have.
5163  AddrModeInsts.clear();
5164  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5165  0);
5166  // Defer the query (and possible computation of) the dom tree to point of
5167  // actual use. It's expected that most address matches don't actually need
5168  // the domtree.
5169  auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5170  Function *F = MemoryInst->getParent()->getParent();
5171  return this->getDT(*F);
5172  };
5173  ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5174  V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5175  *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5176  BFI.get());
5177 
5178  GetElementPtrInst *GEP = LargeOffsetGEP.first;
5179  if (GEP && !NewGEPBases.count(GEP)) {
5180  // If splitting the underlying data structure can reduce the offset of a
5181  // GEP, collect the GEP. Skip the GEPs that are the new bases of
5182  // previously split data structures.
5183  LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5184  LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5185  }
5186 
5187  NewAddrMode.OriginalValue = V;
5188  if (!AddrModes.addNewAddrMode(NewAddrMode))
5189  break;
5190  }
5191 
5192  // Try to combine the AddrModes we've collected. If we couldn't collect any,
5193  // or we have multiple but either couldn't combine them or combining them
5194  // wouldn't do anything useful, bail out now.
5195  if (!AddrModes.combineAddrModes()) {
5196  TPT.rollback(LastKnownGood);
5197  return false;
5198  }
5199  bool Modified = TPT.commit();
5200 
5201  // Get the combined AddrMode (or the only AddrMode, if we only had one).
5202  ExtAddrMode AddrMode = AddrModes.getAddrMode();
5203 
5204  // If all the instructions matched are already in this BB, don't do anything.
5205  // If we saw a Phi node then it is not local definitely, and if we saw a select
5206  // then we want to push the address calculation past it even if it's already
5207  // in this BB.
5208  if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5209  return IsNonLocalValue(V, MemoryInst->getParent());
5210  })) {
5211  LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5212  << "\n");
5213  return Modified;
5214  }
5215 
5216  // Insert this computation right after this user. Since our caller is
5217  // scanning from the top of the BB to the bottom, reuse of the expr are
5218  // guaranteed to happen later.
5219  IRBuilder<> Builder(MemoryInst);
5220 
5221  // Now that we determined the addressing expression we want to use and know
5222  // that we have to sink it into this block. Check to see if we have already
5223  // done this for some other load/store instr in this block. If so, reuse
5224  // the computation. Before attempting reuse, check if the address is valid
5225  // as it may have been erased.
5226 
5227  WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5228 
5229  Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5230  if (SunkAddr) {
5231  LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5232  << " for " << *MemoryInst << "\n");
5233  if (SunkAddr->getType() != Addr->getType())
5234  SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5236  SubtargetInfo->addrSinkUsingGEPs())) {
5237  // By default, we use the GEP-based method when AA is used later. This
5238  // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5239  LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5240  << " for " << *MemoryInst << "\n");
5241  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5242  Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5243 
5244  // First, find the pointer.
5245  if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5246  ResultPtr = AddrMode.BaseReg;
5247  AddrMode.BaseReg = nullptr;
5248  }
5249 
5250  if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5251  // We can't add more than one pointer together, nor can we scale a
5252  // pointer (both of which seem meaningless).
5253  if (ResultPtr || AddrMode.Scale != 1)
5254  return Modified;
5255 
5256  ResultPtr = AddrMode.ScaledReg;
5257  AddrMode.Scale = 0;
5258  }
5259 
5260  // It is only safe to sign extend the BaseReg if we know that the math
5261  // required to create it did not overflow before we extend it. Since
5262  // the original IR value was tossed in favor of a constant back when
5263  // the AddrMode was created we need to bail out gracefully if widths
5264  // do not match instead of extending it.
5265  //
5266  // (See below for code to add the scale.)
5267  if (AddrMode.Scale) {
5268  Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5269  if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5270  cast<IntegerType>(ScaledRegTy)->getBitWidth())
5271  return Modified;
5272  }
5273 
5274  if (AddrMode.BaseGV) {
5275  if (ResultPtr)
5276  return Modified;
5277 
5278  ResultPtr = AddrMode.BaseGV;
5279  }
5280 
5281  // If the real base value actually came from an inttoptr, then the matcher
5282  // will look through it and provide only the integer value. In that case,
5283  // use it here.
5284  if (!DL->isNonIntegralPointerType(Addr->getType())) {
5285  if (!ResultPtr && AddrMode.BaseReg) {
5286  ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5287  "sunkaddr");
5288  AddrMode.BaseReg = nullptr;
5289  } else if (!ResultPtr && AddrMode.Scale == 1) {
5290  ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5291  "sunkaddr");
5292  AddrMode.Scale = 0;
5293  }
5294  }
5295 
5296  if (!ResultPtr &&
5297  !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
5298  SunkAddr = Constant::getNullValue(Addr->getType());
5299  } else if (!ResultPtr) {
5300  return Modified;
5301  } else {
5302  Type *I8PtrTy =
5303  Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5304  Type *I8Ty = Builder.getInt8Ty();
5305 
5306  // Start with the base register. Do this first so that subsequent address
5307  // matching finds it last, which will prevent it from trying to match it
5308  // as the scaled value in case it happens to be a mul. That would be
5309  // problematic if we've sunk a different mul for the scale, because then
5310  // we'd end up sinking both muls.
5311  if (AddrMode.BaseReg) {
5312  Value *V = AddrMode.BaseReg;
5313  if (V->getType() != IntPtrTy)
5314  V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5315 
5316  ResultIndex = V;
5317  }
5318 
5319  // Add the scale value.
5320  if (AddrMode.Scale) {
5321  Value *V = AddrMode.ScaledReg;
5322  if (V->getType() == IntPtrTy) {
5323  // done.
5324  } else {
5325  assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5326  cast<IntegerType>(V->getType())->getBitWidth() &&
5327  "We can't transform if ScaledReg is too narrow");
5328  V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5329  }
5330 
5331  if (AddrMode.Scale != 1)
5332  V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5333  "sunkaddr");
5334  if (ResultIndex)
5335  ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5336  else
5337  ResultIndex = V;
5338  }
5339 
5340  // Add in the Base Offset if present.
5341  if (AddrMode.BaseOffs) {
5342  Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5343  if (ResultIndex) {
5344  // We need to add this separately from the scale above to help with
5345  // SDAG consecutive load/store merging.
5346  if (ResultPtr->getType() != I8PtrTy)
5347  ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5348  ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex,
5349  "sunkaddr", AddrMode.InBounds);
5350  }
5351 
5352  ResultIndex = V;
5353  }
5354 
5355  if (!ResultIndex) {
5356  SunkAddr = ResultPtr;
5357  } else {
5358  if (ResultPtr->getType() != I8PtrTy)
5359  ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5360  SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr",
5361  AddrMode.InBounds);
5362  }
5363 
5364  if (SunkAddr->getType() != Addr->getType())
5365  SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5366  }
5367  } else {
5368  // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5369  // non-integral pointers, so in that case bail out now.
5370  Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5371  Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5372  PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5373  PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5374  if (DL->isNonIntegralPointerType(Addr->getType()) ||
5375  (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5376  (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5377  (AddrMode.BaseGV &&
5378  DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5379  return Modified;
5380 
5381  LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5382  << " for " << *MemoryInst << "\n");
5383  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5384  Value *Result = nullptr;
5385 
5386  // Start with the base register. Do this first so that subsequent address
5387  // matching finds it last, which will prevent it from trying to match it
5388  // as the scaled value in case it happens to be a mul. That would be
5389  // problematic if we've sunk a different mul for the scale, because then
5390  // we'd end up sinking both muls.
5391  if (AddrMode.BaseReg) {
5392  Value *V = AddrMode.BaseReg;
5393  if (V->getType()->isPointerTy())
5394  V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5395  if (V->getType() != IntPtrTy)
5396  V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5397  Result = V;
5398  }
5399 
5400  // Add the scale value.
5401  if (AddrMode.Scale) {
5402  Value *V = AddrMode.ScaledReg;
5403  if (V->getType() == IntPtrTy) {
5404  // done.
5405  } else if (V->getType()->isPointerTy()) {
5406  V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5407  } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5408  cast<IntegerType>(V->getType())->getBitWidth()) {
5409  V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5410  } else {
5411  // It is only safe to sign extend the BaseReg if we know that the math
5412  // required to create it did not overflow before we extend it. Since
5413  // the original IR value was tossed in favor of a constant back when
5414  // the AddrMode was created we need to bail out gracefully if widths
5415  // do not match instead of extending it.
5416  Instruction *I = dyn_cast_or_null<Instruction>(Result);
5417  if (I && (Result != AddrMode.BaseReg))
5418  I->eraseFromParent();
<