LLVM  14.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1 //===- InstCombinePHI.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitPHINode function.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
19 #include "llvm/IR/PatternMatch.h"
23 
24 using namespace llvm;
25 using namespace llvm::PatternMatch;
26 
27 #define DEBUG_TYPE "instcombine"
28 
29 static cl::opt<unsigned>
30 MaxNumPhis("instcombine-max-num-phis", cl::init(512),
31  cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
32 
33 STATISTIC(NumPHIsOfInsertValues,
34  "Number of phi-of-insertvalue turned into insertvalue-of-phis");
35 STATISTIC(NumPHIsOfExtractValues,
36  "Number of phi-of-extractvalue turned into extractvalue-of-phi");
37 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
38 
39 /// The PHI arguments will be folded into a single operation with a PHI node
40 /// as input. The debug location of the single operation will be the merged
41 /// locations of the original PHI node arguments.
43  auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
44  Inst->setDebugLoc(FirstInst->getDebugLoc());
45  // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
46  // will be inefficient.
47  assert(!isa<CallInst>(Inst));
48 
49  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
50  auto *I = cast<Instruction>(PN.getIncomingValue(i));
51  Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
52  }
53 }
54 
55 // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
56 // If there is an existing pointer typed PHI that produces the same value as PN,
57 // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
58 // PHI node:
59 //
60 // Case-1:
61 // bb1:
62 // int_init = PtrToInt(ptr_init)
63 // br label %bb2
64 // bb2:
65 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
66 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
67 // ptr_val2 = IntToPtr(int_val)
68 // ...
69 // use(ptr_val2)
70 // ptr_val_inc = ...
71 // inc_val_inc = PtrToInt(ptr_val_inc)
72 //
73 // ==>
74 // bb1:
75 // br label %bb2
76 // bb2:
77 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
78 // ...
79 // use(ptr_val)
80 // ptr_val_inc = ...
81 //
82 // Case-2:
83 // bb1:
84 // int_ptr = BitCast(ptr_ptr)
85 // int_init = Load(int_ptr)
86 // br label %bb2
87 // bb2:
88 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
89 // ptr_val2 = IntToPtr(int_val)
90 // ...
91 // use(ptr_val2)
92 // ptr_val_inc = ...
93 // inc_val_inc = PtrToInt(ptr_val_inc)
94 // ==>
95 // bb1:
96 // ptr_init = Load(ptr_ptr)
97 // br label %bb2
98 // bb2:
99 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
100 // ...
101 // use(ptr_val)
102 // ptr_val_inc = ...
103 // ...
104 //
106  if (!PN.getType()->isIntegerTy())
107  return nullptr;
108  if (!PN.hasOneUse())
109  return nullptr;
110 
111  auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
112  if (!IntToPtr)
113  return nullptr;
114 
115  // Check if the pointer is actually used as pointer:
116  auto HasPointerUse = [](Instruction *IIP) {
117  for (User *U : IIP->users()) {
118  Value *Ptr = nullptr;
119  if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
120  Ptr = LoadI->getPointerOperand();
121  } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
122  Ptr = SI->getPointerOperand();
123  } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
124  Ptr = GI->getPointerOperand();
125  }
126 
127  if (Ptr && Ptr == IIP)
128  return true;
129  }
130  return false;
131  };
132 
133  if (!HasPointerUse(IntToPtr))
134  return nullptr;
135 
136  if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
137  DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
138  return nullptr;
139 
140  SmallVector<Value *, 4> AvailablePtrVals;
141  for (unsigned i = 0; i != PN.getNumIncomingValues(); ++i) {
142  Value *Arg = PN.getIncomingValue(i);
143 
144  // First look backward:
145  if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
146  AvailablePtrVals.emplace_back(PI->getOperand(0));
147  continue;
148  }
149 
150  // Next look forward:
151  Value *ArgIntToPtr = nullptr;
152  for (User *U : Arg->users()) {
153  if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
154  (DT.dominates(cast<Instruction>(U), PN.getIncomingBlock(i)) ||
155  cast<Instruction>(U)->getParent() == PN.getIncomingBlock(i))) {
156  ArgIntToPtr = U;
157  break;
158  }
159  }
160 
161  if (ArgIntToPtr) {
162  AvailablePtrVals.emplace_back(ArgIntToPtr);
163  continue;
164  }
165 
166  // If Arg is defined by a PHI, allow it. This will also create
167  // more opportunities iteratively.
168  if (isa<PHINode>(Arg)) {
169  AvailablePtrVals.emplace_back(Arg);
170  continue;
171  }
172 
173  // For a single use integer load:
174  auto *LoadI = dyn_cast<LoadInst>(Arg);
175  if (!LoadI)
176  return nullptr;
177 
178  if (!LoadI->hasOneUse())
179  return nullptr;
180 
181  // Push the integer typed Load instruction into the available
182  // value set, and fix it up later when the pointer typed PHI
183  // is synthesized.
184  AvailablePtrVals.emplace_back(LoadI);
185  }
186 
187  // Now search for a matching PHI
188  auto *BB = PN.getParent();
189  assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
190  "Not enough available ptr typed incoming values");
191  PHINode *MatchingPtrPHI = nullptr;
192  unsigned NumPhis = 0;
193  for (auto II = BB->begin(); II != BB->end(); II++, NumPhis++) {
194  // FIXME: consider handling this in AggressiveInstCombine
195  PHINode *PtrPHI = dyn_cast<PHINode>(II);
196  if (!PtrPHI)
197  break;
198  if (NumPhis > MaxNumPhis)
199  return nullptr;
200  if (PtrPHI == &PN || PtrPHI->getType() != IntToPtr->getType())
201  continue;
202  MatchingPtrPHI = PtrPHI;
203  for (unsigned i = 0; i != PtrPHI->getNumIncomingValues(); ++i) {
204  if (AvailablePtrVals[i] !=
206  MatchingPtrPHI = nullptr;
207  break;
208  }
209  }
210 
211  if (MatchingPtrPHI)
212  break;
213  }
214 
215  if (MatchingPtrPHI) {
216  assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
217  "Phi's Type does not match with IntToPtr");
218  // The PtrToCast + IntToPtr will be simplified later
219  return CastInst::CreateBitOrPointerCast(MatchingPtrPHI,
220  IntToPtr->getOperand(0)->getType());
221  }
222 
223  // If it requires a conversion for every PHI operand, do not do it.
224  if (all_of(AvailablePtrVals, [&](Value *V) {
225  return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
226  }))
227  return nullptr;
228 
229  // If any of the operand that requires casting is a terminator
230  // instruction, do not do it. Similarly, do not do the transform if the value
231  // is PHI in a block with no insertion point, for example, a catchswitch
232  // block, since we will not be able to insert a cast after the PHI.
233  if (any_of(AvailablePtrVals, [&](Value *V) {
234  if (V->getType() == IntToPtr->getType())
235  return false;
236  auto *Inst = dyn_cast<Instruction>(V);
237  if (!Inst)
238  return false;
239  if (Inst->isTerminator())
240  return true;
241  auto *BB = Inst->getParent();
242  if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
243  return true;
244  return false;
245  }))
246  return nullptr;
247 
248  PHINode *NewPtrPHI = PHINode::Create(
249  IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
250 
251  InsertNewInstBefore(NewPtrPHI, PN);
253  for (unsigned i = 0; i != PN.getNumIncomingValues(); ++i) {
254  auto *IncomingBB = PN.getIncomingBlock(i);
255  auto *IncomingVal = AvailablePtrVals[i];
256 
257  if (IncomingVal->getType() == IntToPtr->getType()) {
258  NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
259  continue;
260  }
261 
262 #ifndef NDEBUG
263  LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
264  assert((isa<PHINode>(IncomingVal) ||
265  IncomingVal->getType()->isPointerTy() ||
266  (LoadI && LoadI->hasOneUse())) &&
267  "Can not replace LoadInst with multiple uses");
268 #endif
269  // Need to insert a BitCast.
270  // For an integer Load instruction with a single use, the load + IntToPtr
271  // cast will be simplified into a pointer load:
272  // %v = load i64, i64* %a.ip, align 8
273  // %v.cast = inttoptr i64 %v to float **
274  // ==>
275  // %v.ptrp = bitcast i64 * %a.ip to float **
276  // %v.cast = load float *, float ** %v.ptrp, align 8
277  Instruction *&CI = Casts[IncomingVal];
278  if (!CI) {
279  CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
280  IncomingVal->getName() + ".ptr");
281  if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
282  BasicBlock::iterator InsertPos(IncomingI);
283  InsertPos++;
284  BasicBlock *BB = IncomingI->getParent();
285  if (isa<PHINode>(IncomingI))
286  InsertPos = BB->getFirstInsertionPt();
287  assert(InsertPos != BB->end() && "should have checked above");
288  InsertNewInstBefore(CI, *InsertPos);
289  } else {
290  auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
291  InsertNewInstBefore(CI, *InsertBB->getFirstInsertionPt());
292  }
293  }
294  NewPtrPHI->addIncoming(CI, IncomingBB);
295  }
296 
297  // The PtrToCast + IntToPtr will be simplified later
298  return CastInst::CreateBitOrPointerCast(NewPtrPHI,
299  IntToPtr->getOperand(0)->getType());
300 }
301 
302 // Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
303 // fold Phi-operand to bitcast.
305  // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
306  // Make sure all uses of phi are ptr2int.
307  if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
308  return nullptr;
309 
310  // Iterating over all operands to check presence of target pointers for
311  // optimization.
312  bool OperandWithRoundTripCast = false;
313  for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
314  if (auto *NewOp =
315  simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
316  PN.setIncomingValue(OpNum, NewOp);
317  OperandWithRoundTripCast = true;
318  }
319  }
320  if (!OperandWithRoundTripCast)
321  return nullptr;
322  return &PN;
323 }
324 
325 /// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
326 /// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
327 Instruction *
329  auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
330 
331  // Scan to see if all operands are `insertvalue`'s with the same indicies,
332  // and all have a single use.
333  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
334  auto *I = dyn_cast<InsertValueInst>(PN.getIncomingValue(i));
335  if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
336  return nullptr;
337  }
338 
339  // For each operand of an `insertvalue`
340  std::array<PHINode *, 2> NewOperands;
341  for (int OpIdx : {0, 1}) {
342  auto *&NewOperand = NewOperands[OpIdx];
343  // Create a new PHI node to receive the values the operand has in each
344  // incoming basic block.
345  NewOperand = PHINode::Create(
346  FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
347  FirstIVI->getOperand(OpIdx)->getName() + ".pn");
348  // And populate each operand's PHI with said values.
349  for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
350  NewOperand->addIncoming(
351  cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
352  std::get<0>(Incoming));
353  InsertNewInstBefore(NewOperand, PN);
354  }
355 
356  // And finally, create `insertvalue` over the newly-formed PHI nodes.
357  auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
358  FirstIVI->getIndices(), PN.getName());
359 
360  PHIArgMergedDebugLoc(NewIVI, PN);
361  ++NumPHIsOfInsertValues;
362  return NewIVI;
363 }
364 
365 /// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
366 /// turn this into a phi[a,b] and a single extractvalue.
367 Instruction *
369  auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
370 
371  // Scan to see if all operands are `extractvalue`'s with the same indicies,
372  // and all have a single use.
373  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
374  auto *I = dyn_cast<ExtractValueInst>(PN.getIncomingValue(i));
375  if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
376  I->getAggregateOperand()->getType() !=
377  FirstEVI->getAggregateOperand()->getType())
378  return nullptr;
379  }
380 
381  // Create a new PHI node to receive the values the aggregate operand has
382  // in each incoming basic block.
383  auto *NewAggregateOperand = PHINode::Create(
384  FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
385  FirstEVI->getAggregateOperand()->getName() + ".pn");
386  // And populate the PHI with said values.
387  for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
388  NewAggregateOperand->addIncoming(
389  cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
390  std::get<0>(Incoming));
391  InsertNewInstBefore(NewAggregateOperand, PN);
392 
393  // And finally, create `extractvalue` over the newly-formed PHI nodes.
394  auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
395  FirstEVI->getIndices(), PN.getName());
396 
397  PHIArgMergedDebugLoc(NewEVI, PN);
398  ++NumPHIsOfExtractValues;
399  return NewEVI;
400 }
401 
402 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
403 /// adds all have a single user, turn this into a phi and a single binop.
405  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
406  assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
407  unsigned Opc = FirstInst->getOpcode();
408  Value *LHSVal = FirstInst->getOperand(0);
409  Value *RHSVal = FirstInst->getOperand(1);
410 
411  Type *LHSType = LHSVal->getType();
412  Type *RHSType = RHSVal->getType();
413 
414  // Scan to see if all operands are the same opcode, and all have one user.
415  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
416  Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
417  if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
418  // Verify type of the LHS matches so we don't fold cmp's of different
419  // types.
420  I->getOperand(0)->getType() != LHSType ||
421  I->getOperand(1)->getType() != RHSType)
422  return nullptr;
423 
424  // If they are CmpInst instructions, check their predicates
425  if (CmpInst *CI = dyn_cast<CmpInst>(I))
426  if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
427  return nullptr;
428 
429  // Keep track of which operand needs a phi node.
430  if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
431  if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
432  }
433 
434  // If both LHS and RHS would need a PHI, don't do this transformation,
435  // because it would increase the number of PHIs entering the block,
436  // which leads to higher register pressure. This is especially
437  // bad when the PHIs are in the header of a loop.
438  if (!LHSVal && !RHSVal)
439  return nullptr;
440 
441  // Otherwise, this is safe to transform!
442 
443  Value *InLHS = FirstInst->getOperand(0);
444  Value *InRHS = FirstInst->getOperand(1);
445  PHINode *NewLHS = nullptr, *NewRHS = nullptr;
446  if (!LHSVal) {
447  NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
448  FirstInst->getOperand(0)->getName() + ".pn");
449  NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
450  InsertNewInstBefore(NewLHS, PN);
451  LHSVal = NewLHS;
452  }
453 
454  if (!RHSVal) {
455  NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
456  FirstInst->getOperand(1)->getName() + ".pn");
457  NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
458  InsertNewInstBefore(NewRHS, PN);
459  RHSVal = NewRHS;
460  }
461 
462  // Add all operands to the new PHIs.
463  if (NewLHS || NewRHS) {
464  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
465  Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
466  if (NewLHS) {
467  Value *NewInLHS = InInst->getOperand(0);
468  NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
469  }
470  if (NewRHS) {
471  Value *NewInRHS = InInst->getOperand(1);
472  NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
473  }
474  }
475  }
476 
477  if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
478  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
479  LHSVal, RHSVal);
480  PHIArgMergedDebugLoc(NewCI, PN);
481  return NewCI;
482  }
483 
484  BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
485  BinaryOperator *NewBinOp =
486  BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
487 
488  NewBinOp->copyIRFlags(PN.getIncomingValue(0));
489 
490  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
491  NewBinOp->andIRFlags(PN.getIncomingValue(i));
492 
493  PHIArgMergedDebugLoc(NewBinOp, PN);
494  return NewBinOp;
495 }
496 
498  GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
499 
500  SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
501  FirstInst->op_end());
502  // This is true if all GEP bases are allocas and if all indices into them are
503  // constants.
504  bool AllBasePointersAreAllocas = true;
505 
506  // We don't want to replace this phi if the replacement would require
507  // more than one phi, which leads to higher register pressure. This is
508  // especially bad when the PHIs are in the header of a loop.
509  bool NeededPhi = false;
510 
511  bool AllInBounds = true;
512 
513  // Scan to see if all operands are the same opcode, and all have one user.
514  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
516  dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
517  if (!GEP || !GEP->hasOneUser() || GEP->getType() != FirstInst->getType() ||
518  GEP->getNumOperands() != FirstInst->getNumOperands())
519  return nullptr;
520 
521  AllInBounds &= GEP->isInBounds();
522 
523  // Keep track of whether or not all GEPs are of alloca pointers.
524  if (AllBasePointersAreAllocas &&
525  (!isa<AllocaInst>(GEP->getOperand(0)) ||
526  !GEP->hasAllConstantIndices()))
527  AllBasePointersAreAllocas = false;
528 
529  // Compare the operand lists.
530  for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
531  if (FirstInst->getOperand(op) == GEP->getOperand(op))
532  continue;
533 
534  // Don't merge two GEPs when two operands differ (introducing phi nodes)
535  // if one of the PHIs has a constant for the index. The index may be
536  // substantially cheaper to compute for the constants, so making it a
537  // variable index could pessimize the path. This also handles the case
538  // for struct indices, which must always be constant.
539  if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
540  isa<ConstantInt>(GEP->getOperand(op)))
541  return nullptr;
542 
543  if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
544  return nullptr;
545 
546  // If we already needed a PHI for an earlier operand, and another operand
547  // also requires a PHI, we'd be introducing more PHIs than we're
548  // eliminating, which increases register pressure on entry to the PHI's
549  // block.
550  if (NeededPhi)
551  return nullptr;
552 
553  FixedOperands[op] = nullptr; // Needs a PHI.
554  NeededPhi = true;
555  }
556  }
557 
558  // If all of the base pointers of the PHI'd GEPs are from allocas, don't
559  // bother doing this transformation. At best, this will just save a bit of
560  // offset calculation, but all the predecessors will have to materialize the
561  // stack address into a register anyway. We'd actually rather *clone* the
562  // load up into the predecessors so that we have a load of a gep of an alloca,
563  // which can usually all be folded into the load.
564  if (AllBasePointersAreAllocas)
565  return nullptr;
566 
567  // Otherwise, this is safe to transform. Insert PHI nodes for each operand
568  // that is variable.
569  SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
570 
571  bool HasAnyPHIs = false;
572  for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
573  if (FixedOperands[i]) continue; // operand doesn't need a phi.
574  Value *FirstOp = FirstInst->getOperand(i);
575  PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
576  FirstOp->getName()+".pn");
577  InsertNewInstBefore(NewPN, PN);
578 
579  NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
580  OperandPhis[i] = NewPN;
581  FixedOperands[i] = NewPN;
582  HasAnyPHIs = true;
583  }
584 
585 
586  // Add all operands to the new PHIs.
587  if (HasAnyPHIs) {
588  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
589  GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
590  BasicBlock *InBB = PN.getIncomingBlock(i);
591 
592  for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
593  if (PHINode *OpPhi = OperandPhis[op])
594  OpPhi->addIncoming(InGEP->getOperand(op), InBB);
595  }
596  }
597 
598  Value *Base = FixedOperands[0];
599  GetElementPtrInst *NewGEP =
601  makeArrayRef(FixedOperands).slice(1));
602  if (AllInBounds) NewGEP->setIsInBounds();
603  PHIArgMergedDebugLoc(NewGEP, PN);
604  return NewGEP;
605 }
606 
607 /// Return true if we know that it is safe to sink the load out of the block
608 /// that defines it. This means that it must be obvious the value of the load is
609 /// not changed from the point of the load to the end of the block it is in.
610 ///
611 /// Finally, it is safe, but not profitable, to sink a load targeting a
612 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
613 /// to a register.
615  BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
616 
617  for (++BBI; BBI != E; ++BBI)
618  if (BBI->mayWriteToMemory()) {
619  // Calls that only access inaccessible memory do not block sinking the
620  // load.
621  if (auto *CB = dyn_cast<CallBase>(BBI))
622  if (CB->onlyAccessesInaccessibleMemory())
623  continue;
624  return false;
625  }
626 
627  // Check for non-address taken alloca. If not address-taken already, it isn't
628  // profitable to do this xform.
629  if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
630  bool isAddressTaken = false;
631  for (User *U : AI->users()) {
632  if (isa<LoadInst>(U)) continue;
633  if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
634  // If storing TO the alloca, then the address isn't taken.
635  if (SI->getOperand(1) == AI) continue;
636  }
637  isAddressTaken = true;
638  break;
639  }
640 
641  if (!isAddressTaken && AI->isStaticAlloca())
642  return false;
643  }
644 
645  // If this load is a load from a GEP with a constant offset from an alloca,
646  // then we don't want to sink it. In its present form, it will be
647  // load [constant stack offset]. Sinking it will cause us to have to
648  // materialize the stack addresses in each predecessor in a register only to
649  // do a shared load from register in the successor.
650  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
651  if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
652  if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
653  return false;
654 
655  return true;
656 }
657 
659  LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
660 
661  // FIXME: This is overconservative; this transform is allowed in some cases
662  // for atomic operations.
663  if (FirstLI->isAtomic())
664  return nullptr;
665 
666  // When processing loads, we need to propagate two bits of information to the
667  // sunk load: whether it is volatile, and what its alignment is. We currently
668  // don't sink loads when some have their alignment specified and some don't.
669  // visitLoadInst will propagate an alignment onto the load when TD is around,
670  // and if TD isn't around, we can't handle the mixed case.
671  bool isVolatile = FirstLI->isVolatile();
672  Align LoadAlignment = FirstLI->getAlign();
673  unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
674 
675  // We can't sink the load if the loaded value could be modified between the
676  // load and the PHI.
677  if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
679  return nullptr;
680 
681  // If the PHI is of volatile loads and the load block has multiple
682  // successors, sinking it would remove a load of the volatile value from
683  // the path through the other successor.
684  if (isVolatile &&
685  FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
686  return nullptr;
687 
688  // Check to see if all arguments are the same operation.
689  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
690  LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
691  if (!LI || !LI->hasOneUser())
692  return nullptr;
693 
694  // We can't sink the load if the loaded value could be modified between
695  // the load and the PHI.
696  if (LI->isVolatile() != isVolatile ||
697  LI->getParent() != PN.getIncomingBlock(i) ||
698  LI->getPointerAddressSpace() != LoadAddrSpace ||
700  return nullptr;
701 
702  LoadAlignment = std::min(LoadAlignment, Align(LI->getAlign()));
703 
704  // If the PHI is of volatile loads and the load block has multiple
705  // successors, sinking it would remove a load of the volatile value from
706  // the path through the other successor.
707  if (isVolatile &&
708  LI->getParent()->getTerminator()->getNumSuccessors() != 1)
709  return nullptr;
710  }
711 
712  // Okay, they are all the same operation. Create a new PHI node of the
713  // correct type, and PHI together all of the LHS's of the instructions.
714  PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
716  PN.getName()+".in");
717 
718  Value *InVal = FirstLI->getOperand(0);
719  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
720  LoadInst *NewLI =
721  new LoadInst(FirstLI->getType(), NewPN, "", isVolatile, LoadAlignment);
722 
723  unsigned KnownIDs[] = {
724  LLVMContext::MD_tbaa,
725  LLVMContext::MD_range,
726  LLVMContext::MD_invariant_load,
727  LLVMContext::MD_alias_scope,
728  LLVMContext::MD_noalias,
729  LLVMContext::MD_nonnull,
730  LLVMContext::MD_align,
731  LLVMContext::MD_dereferenceable,
732  LLVMContext::MD_dereferenceable_or_null,
733  LLVMContext::MD_access_group,
734  };
735 
736  for (unsigned ID : KnownIDs)
737  NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
738 
739  // Add all operands to the new PHI and combine TBAA metadata.
740  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
741  LoadInst *LI = cast<LoadInst>(PN.getIncomingValue(i));
742  combineMetadata(NewLI, LI, KnownIDs, true);
743  Value *NewInVal = LI->getOperand(0);
744  if (NewInVal != InVal)
745  InVal = nullptr;
746  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
747  }
748 
749  if (InVal) {
750  // The new PHI unions all of the same values together. This is really
751  // common, so we handle it intelligently here for compile-time speed.
752  NewLI->setOperand(0, InVal);
753  delete NewPN;
754  } else {
755  InsertNewInstBefore(NewPN, PN);
756  }
757 
758  // If this was a volatile load that we are merging, make sure to loop through
759  // and mark all the input loads as non-volatile. If we don't do this, we will
760  // insert a new volatile load and the old ones will not be deletable.
761  if (isVolatile)
762  for (Value *IncValue : PN.incoming_values())
763  cast<LoadInst>(IncValue)->setVolatile(false);
764 
765  PHIArgMergedDebugLoc(NewLI, PN);
766  return NewLI;
767 }
768 
769 /// TODO: This function could handle other cast types, but then it might
770 /// require special-casing a cast from the 'i1' type. See the comment in
771 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
773  // We cannot create a new instruction after the PHI if the terminator is an
774  // EHPad because there is no valid insertion point.
775  if (Instruction *TI = Phi.getParent()->getTerminator())
776  if (TI->isEHPad())
777  return nullptr;
778 
779  // Early exit for the common case of a phi with two operands. These are
780  // handled elsewhere. See the comment below where we check the count of zexts
781  // and constants for more details.
782  unsigned NumIncomingValues = Phi.getNumIncomingValues();
783  if (NumIncomingValues < 3)
784  return nullptr;
785 
786  // Find the narrower type specified by the first zext.
787  Type *NarrowType = nullptr;
788  for (Value *V : Phi.incoming_values()) {
789  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
790  NarrowType = Zext->getSrcTy();
791  break;
792  }
793  }
794  if (!NarrowType)
795  return nullptr;
796 
797  // Walk the phi operands checking that we only have zexts or constants that
798  // we can shrink for free. Store the new operands for the new phi.
799  SmallVector<Value *, 4> NewIncoming;
800  unsigned NumZexts = 0;
801  unsigned NumConsts = 0;
802  for (Value *V : Phi.incoming_values()) {
803  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
804  // All zexts must be identical and have one user.
805  if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
806  return nullptr;
807  NewIncoming.push_back(Zext->getOperand(0));
808  NumZexts++;
809  } else if (auto *C = dyn_cast<Constant>(V)) {
810  // Make sure that constants can fit in the new type.
811  Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
812  if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
813  return nullptr;
814  NewIncoming.push_back(Trunc);
815  NumConsts++;
816  } else {
817  // If it's not a cast or a constant, bail out.
818  return nullptr;
819  }
820  }
821 
822  // The more common cases of a phi with no constant operands or just one
823  // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
824  // respectively. foldOpIntoPhi() wants to do the opposite transform that is
825  // performed here. It tries to replicate a cast in the phi operand's basic
826  // block to expose other folding opportunities. Thus, InstCombine will
827  // infinite loop without this check.
828  if (NumConsts == 0 || NumZexts < 2)
829  return nullptr;
830 
831  // All incoming values are zexts or constants that are safe to truncate.
832  // Create a new phi node of the narrow type, phi together all of the new
833  // operands, and zext the result back to the original type.
834  PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
835  Phi.getName() + ".shrunk");
836  for (unsigned i = 0; i != NumIncomingValues; ++i)
837  NewPhi->addIncoming(NewIncoming[i], Phi.getIncomingBlock(i));
838 
839  InsertNewInstBefore(NewPhi, Phi);
840  return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
841 }
842 
843 /// If all operands to a PHI node are the same "unary" operator and they all are
844 /// only used by the PHI, PHI together their inputs, and do the operation once,
845 /// to the result of the PHI.
847  // We cannot create a new instruction after the PHI if the terminator is an
848  // EHPad because there is no valid insertion point.
849  if (Instruction *TI = PN.getParent()->getTerminator())
850  if (TI->isEHPad())
851  return nullptr;
852 
853  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
854 
855  if (isa<GetElementPtrInst>(FirstInst))
856  return foldPHIArgGEPIntoPHI(PN);
857  if (isa<LoadInst>(FirstInst))
858  return foldPHIArgLoadIntoPHI(PN);
859  if (isa<InsertValueInst>(FirstInst))
860  return foldPHIArgInsertValueInstructionIntoPHI(PN);
861  if (isa<ExtractValueInst>(FirstInst))
862  return foldPHIArgExtractValueInstructionIntoPHI(PN);
863 
864  // Scan the instruction, looking for input operations that can be folded away.
865  // If all input operands to the phi are the same instruction (e.g. a cast from
866  // the same type or "+42") we can pull the operation through the PHI, reducing
867  // code size and simplifying code.
868  Constant *ConstantOp = nullptr;
869  Type *CastSrcTy = nullptr;
870 
871  if (isa<CastInst>(FirstInst)) {
872  CastSrcTy = FirstInst->getOperand(0)->getType();
873 
874  // Be careful about transforming integer PHIs. We don't want to pessimize
875  // the code by turning an i32 into an i1293.
876  if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
877  if (!shouldChangeType(PN.getType(), CastSrcTy))
878  return nullptr;
879  }
880  } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
881  // Can fold binop, compare or shift here if the RHS is a constant,
882  // otherwise call FoldPHIArgBinOpIntoPHI.
883  ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
884  if (!ConstantOp)
885  return foldPHIArgBinOpIntoPHI(PN);
886  } else {
887  return nullptr; // Cannot fold this operation.
888  }
889 
890  // Check to see if all arguments are the same operation.
891  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
892  Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
893  if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
894  return nullptr;
895  if (CastSrcTy) {
896  if (I->getOperand(0)->getType() != CastSrcTy)
897  return nullptr; // Cast operation must match.
898  } else if (I->getOperand(1) != ConstantOp) {
899  return nullptr;
900  }
901  }
902 
903  // Okay, they are all the same operation. Create a new PHI node of the
904  // correct type, and PHI together all of the LHS's of the instructions.
905  PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
907  PN.getName()+".in");
908 
909  Value *InVal = FirstInst->getOperand(0);
910  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
911 
912  // Add all operands to the new PHI.
913  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
914  Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
915  if (NewInVal != InVal)
916  InVal = nullptr;
917  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
918  }
919 
920  Value *PhiVal;
921  if (InVal) {
922  // The new PHI unions all of the same values together. This is really
923  // common, so we handle it intelligently here for compile-time speed.
924  PhiVal = InVal;
925  delete NewPN;
926  } else {
927  InsertNewInstBefore(NewPN, PN);
928  PhiVal = NewPN;
929  }
930 
931  // Insert and return the new operation.
932  if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
933  CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
934  PN.getType());
935  PHIArgMergedDebugLoc(NewCI, PN);
936  return NewCI;
937  }
938 
939  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
940  BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
941  BinOp->copyIRFlags(PN.getIncomingValue(0));
942 
943  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
944  BinOp->andIRFlags(PN.getIncomingValue(i));
945 
946  PHIArgMergedDebugLoc(BinOp, PN);
947  return BinOp;
948  }
949 
950  CmpInst *CIOp = cast<CmpInst>(FirstInst);
951  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
952  PhiVal, ConstantOp);
953  PHIArgMergedDebugLoc(NewCI, PN);
954  return NewCI;
955 }
956 
957 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
958 static bool DeadPHICycle(PHINode *PN,
959  SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
960  if (PN->use_empty()) return true;
961  if (!PN->hasOneUse()) return false;
962 
963  // Remember this node, and if we find the cycle, return.
964  if (!PotentiallyDeadPHIs.insert(PN).second)
965  return true;
966 
967  // Don't scan crazily complex things.
968  if (PotentiallyDeadPHIs.size() == 16)
969  return false;
970 
971  if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
972  return DeadPHICycle(PU, PotentiallyDeadPHIs);
973 
974  return false;
975 }
976 
977 /// Return true if this phi node is always equal to NonPhiInVal.
978 /// This happens with mutually cyclic phi nodes like:
979 /// z = some value; x = phi (y, z); y = phi (x, z)
980 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
981  SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
982  // See if we already saw this PHI node.
983  if (!ValueEqualPHIs.insert(PN).second)
984  return true;
985 
986  // Don't scan crazily complex things.
987  if (ValueEqualPHIs.size() == 16)
988  return false;
989 
990  // Scan the operands to see if they are either phi nodes or are equal to
991  // the value.
992  for (Value *Op : PN->incoming_values()) {
993  if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
994  if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
995  return false;
996  } else if (Op != NonPhiInVal)
997  return false;
998  }
999 
1000  return true;
1001 }
1002 
1003 /// Return an existing non-zero constant if this phi node has one, otherwise
1004 /// return constant 1.
1006  assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1007  for (Value *V : PN.operands())
1008  if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1009  if (!ConstVA->isZero())
1010  return ConstVA;
1011  return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1012 }
1013 
1014 namespace {
1015 struct PHIUsageRecord {
1016  unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1017  unsigned Shift; // The amount shifted.
1018  Instruction *Inst; // The trunc instruction.
1019 
1020  PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
1021  : PHIId(pn), Shift(Sh), Inst(User) {}
1022 
1023  bool operator<(const PHIUsageRecord &RHS) const {
1024  if (PHIId < RHS.PHIId) return true;
1025  if (PHIId > RHS.PHIId) return false;
1026  if (Shift < RHS.Shift) return true;
1027  if (Shift > RHS.Shift) return false;
1028  return Inst->getType()->getPrimitiveSizeInBits() <
1029  RHS.Inst->getType()->getPrimitiveSizeInBits();
1030  }
1031 };
1032 
1033 struct LoweredPHIRecord {
1034  PHINode *PN; // The PHI that was lowered.
1035  unsigned Shift; // The amount shifted.
1036  unsigned Width; // The width extracted.
1037 
1038  LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
1039  : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1040 
1041  // Ctor form used by DenseMap.
1042  LoweredPHIRecord(PHINode *pn, unsigned Sh)
1043  : PN(pn), Shift(Sh), Width(0) {}
1044 };
1045 } // namespace
1046 
1047 namespace llvm {
1048  template<>
1049  struct DenseMapInfo<LoweredPHIRecord> {
1050  static inline LoweredPHIRecord getEmptyKey() {
1051  return LoweredPHIRecord(nullptr, 0);
1052  }
1053  static inline LoweredPHIRecord getTombstoneKey() {
1054  return LoweredPHIRecord(nullptr, 1);
1055  }
1056  static unsigned getHashValue(const LoweredPHIRecord &Val) {
1057  return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1058  (Val.Width>>3);
1059  }
1060  static bool isEqual(const LoweredPHIRecord &LHS,
1061  const LoweredPHIRecord &RHS) {
1062  return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1063  LHS.Width == RHS.Width;
1064  }
1065  };
1066 } // namespace llvm
1067 
1068 
1069 /// This is an integer PHI and we know that it has an illegal type: see if it is
1070 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1071 /// the various pieces being extracted. This sort of thing is introduced when
1072 /// SROA promotes an aggregate to large integer values.
1073 ///
1074 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1075 /// inttoptr. We should produce new PHIs in the right type.
1076 ///
1078  // PHIUsers - Keep track of all of the truncated values extracted from a set
1079  // of PHIs, along with their offset. These are the things we want to rewrite.
1081 
1082  // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1083  // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1084  // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1085  // check the uses of (to ensure they are all extracts).
1086  SmallVector<PHINode*, 8> PHIsToSlice;
1087  SmallPtrSet<PHINode*, 8> PHIsInspected;
1088 
1089  PHIsToSlice.push_back(&FirstPhi);
1090  PHIsInspected.insert(&FirstPhi);
1091 
1092  for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1093  PHINode *PN = PHIsToSlice[PHIId];
1094 
1095  // Scan the input list of the PHI. If any input is an invoke, and if the
1096  // input is defined in the predecessor, then we won't be split the critical
1097  // edge which is required to insert a truncate. Because of this, we have to
1098  // bail out.
1099  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1100  InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
1101  if (!II) continue;
1102  if (II->getParent() != PN->getIncomingBlock(i))
1103  continue;
1104 
1105  // If we have a phi, and if it's directly in the predecessor, then we have
1106  // a critical edge where we need to put the truncate. Since we can't
1107  // split the edge in instcombine, we have to bail out.
1108  return nullptr;
1109  }
1110 
1111  for (User *U : PN->users()) {
1112  Instruction *UserI = cast<Instruction>(U);
1113 
1114  // If the user is a PHI, inspect its uses recursively.
1115  if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1116  if (PHIsInspected.insert(UserPN).second)
1117  PHIsToSlice.push_back(UserPN);
1118  continue;
1119  }
1120 
1121  // Truncates are always ok.
1122  if (isa<TruncInst>(UserI)) {
1123  PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1124  continue;
1125  }
1126 
1127  // Otherwise it must be a lshr which can only be used by one trunc.
1128  if (UserI->getOpcode() != Instruction::LShr ||
1129  !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1130  !isa<ConstantInt>(UserI->getOperand(1)))
1131  return nullptr;
1132 
1133  // Bail on out of range shifts.
1134  unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1135  if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1136  return nullptr;
1137 
1138  unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1139  PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1140  }
1141  }
1142 
1143  // If we have no users, they must be all self uses, just nuke the PHI.
1144  if (PHIUsers.empty())
1145  return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1146 
1147  // If this phi node is transformable, create new PHIs for all the pieces
1148  // extracted out of it. First, sort the users by their offset and size.
1149  array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1150 
1151  LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1152  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) dbgs()
1153  << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';);
1154 
1155  // PredValues - This is a temporary used when rewriting PHI nodes. It is
1156  // hoisted out here to avoid construction/destruction thrashing.
1157  DenseMap<BasicBlock*, Value*> PredValues;
1158 
1159  // ExtractedVals - Each new PHI we introduce is saved here so we don't
1160  // introduce redundant PHIs.
1162 
1163  for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1164  unsigned PHIId = PHIUsers[UserI].PHIId;
1165  PHINode *PN = PHIsToSlice[PHIId];
1166  unsigned Offset = PHIUsers[UserI].Shift;
1167  Type *Ty = PHIUsers[UserI].Inst->getType();
1168 
1169  PHINode *EltPHI;
1170 
1171  // If we've already lowered a user like this, reuse the previously lowered
1172  // value.
1173  if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1174 
1175  // Otherwise, Create the new PHI node for this user.
1176  EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1177  PN->getName()+".off"+Twine(Offset), PN);
1178  assert(EltPHI->getType() != PN->getType() &&
1179  "Truncate didn't shrink phi?");
1180 
1181  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1182  BasicBlock *Pred = PN->getIncomingBlock(i);
1183  Value *&PredVal = PredValues[Pred];
1184 
1185  // If we already have a value for this predecessor, reuse it.
1186  if (PredVal) {
1187  EltPHI->addIncoming(PredVal, Pred);
1188  continue;
1189  }
1190 
1191  // Handle the PHI self-reuse case.
1192  Value *InVal = PN->getIncomingValue(i);
1193  if (InVal == PN) {
1194  PredVal = EltPHI;
1195  EltPHI->addIncoming(PredVal, Pred);
1196  continue;
1197  }
1198 
1199  if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1200  // If the incoming value was a PHI, and if it was one of the PHIs we
1201  // already rewrote it, just use the lowered value.
1202  if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1203  PredVal = Res;
1204  EltPHI->addIncoming(PredVal, Pred);
1205  continue;
1206  }
1207  }
1208 
1209  // Otherwise, do an extract in the predecessor.
1210  Builder.SetInsertPoint(Pred->getTerminator());
1211  Value *Res = InVal;
1212  if (Offset)
1213  Res = Builder.CreateLShr(Res, ConstantInt::get(InVal->getType(),
1214  Offset), "extract");
1215  Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1216  PredVal = Res;
1217  EltPHI->addIncoming(Res, Pred);
1218 
1219  // If the incoming value was a PHI, and if it was one of the PHIs we are
1220  // rewriting, we will ultimately delete the code we inserted. This
1221  // means we need to revisit that PHI to make sure we extract out the
1222  // needed piece.
1223  if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
1224  if (PHIsInspected.count(OldInVal)) {
1225  unsigned RefPHIId =
1226  find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1227  PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
1228  cast<Instruction>(Res)));
1229  ++UserE;
1230  }
1231  }
1232  PredValues.clear();
1233 
1234  LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1235  << *EltPHI << '\n');
1236  ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1237  }
1238 
1239  // Replace the use of this piece with the PHI node.
1240  replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1241  }
1242 
1243  // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1244  // with poison.
1245  Value *Poison = PoisonValue::get(FirstPhi.getType());
1246  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
1247  replaceInstUsesWith(*PHIsToSlice[i], Poison);
1248  return replaceInstUsesWith(FirstPhi, Poison);
1249 }
1250 
1252  const DominatorTree &DT) {
1253  // Simplify the following patterns:
1254  // if (cond)
1255  // / \
1256  // ... ...
1257  // \ /
1258  // phi [true] [false]
1259  if (!PN.getType()->isIntegerTy(1))
1260  return nullptr;
1261 
1262  if (PN.getNumOperands() != 2)
1263  return nullptr;
1264 
1265  // Make sure all inputs are constants.
1266  if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
1267  return nullptr;
1268 
1269  BasicBlock *BB = PN.getParent();
1270  // Do not bother with unreachable instructions.
1271  if (!DT.isReachableFromEntry(BB))
1272  return nullptr;
1273 
1274  // Same inputs.
1275  if (PN.getOperand(0) == PN.getOperand(1))
1276  return PN.getOperand(0);
1277 
1278  BasicBlock *TruePred = nullptr, *FalsePred = nullptr;
1279  for (auto *Pred : predecessors(BB)) {
1280  auto *Input = cast<ConstantInt>(PN.getIncomingValueForBlock(Pred));
1281  if (Input->isAllOnesValue())
1282  TruePred = Pred;
1283  else
1284  FalsePred = Pred;
1285  }
1286  assert(TruePred && FalsePred && "Must be!");
1287 
1288  // Check which edge of the dominator dominates the true input. If it is the
1289  // false edge, we should invert the condition.
1290  auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1291  auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
1292  if (!BI || BI->isUnconditional())
1293  return nullptr;
1294 
1295  // Check that edges outgoing from the idom's terminators dominate respective
1296  // inputs of the Phi.
1297  BasicBlockEdge TrueOutEdge(IDom, BI->getSuccessor(0));
1298  BasicBlockEdge FalseOutEdge(IDom, BI->getSuccessor(1));
1299 
1300  BasicBlockEdge TrueIncEdge(TruePred, BB);
1301  BasicBlockEdge FalseIncEdge(FalsePred, BB);
1302 
1303  auto *Cond = BI->getCondition();
1304  if (DT.dominates(TrueOutEdge, TrueIncEdge) &&
1305  DT.dominates(FalseOutEdge, FalseIncEdge))
1306  // This Phi is actually equivalent to branching condition of IDom.
1307  return Cond;
1308  else if (DT.dominates(TrueOutEdge, FalseIncEdge) &&
1309  DT.dominates(FalseOutEdge, TrueIncEdge)) {
1310  // This Phi is actually opposite to branching condition of IDom. We invert
1311  // the condition that will potentially open up some opportunities for
1312  // sinking.
1313  auto InsertPt = BB->getFirstInsertionPt();
1314  if (InsertPt != BB->end()) {
1315  Self.Builder.SetInsertPoint(&*InsertPt);
1316  return Self.Builder.CreateNot(Cond);
1317  }
1318  }
1319 
1320  return nullptr;
1321 }
1322 
1323 // PHINode simplification
1324 //
1326  if (Value *V = SimplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1327  return replaceInstUsesWith(PN, V);
1328 
1329  if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1330  return Result;
1331 
1332  if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1333  return Result;
1334 
1335  // If all PHI operands are the same operation, pull them through the PHI,
1336  // reducing code size.
1337  if (isa<Instruction>(PN.getIncomingValue(0)) &&
1338  isa<Instruction>(PN.getIncomingValue(1)) &&
1339  cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
1340  cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
1341  PN.getIncomingValue(0)->hasOneUser())
1342  if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1343  return Result;
1344 
1345  // If the incoming values are pointer casts of the same original value,
1346  // replace the phi with a single cast iff we can insert a non-PHI instruction.
1347  if (PN.getType()->isPointerTy() &&
1348  PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1349  Value *IV0 = PN.getIncomingValue(0);
1350  Value *IV0Stripped = IV0->stripPointerCasts();
1351  // Set to keep track of values known to be equal to IV0Stripped after
1352  // stripping pointer casts.
1353  SmallPtrSet<Value *, 4> CheckedIVs;
1354  CheckedIVs.insert(IV0);
1355  if (IV0 != IV0Stripped &&
1356  all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1357  return !CheckedIVs.insert(IV).second ||
1358  IV0Stripped == IV->stripPointerCasts();
1359  })) {
1360  return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1361  }
1362  }
1363 
1364  // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1365  // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1366  // PHI)... break the cycle.
1367  if (PN.hasOneUse()) {
1368  if (Instruction *Result = foldIntegerTypedPHI(PN))
1369  return Result;
1370 
1371  Instruction *PHIUser = cast<Instruction>(PN.user_back());
1372  if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1373  SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1374  PotentiallyDeadPHIs.insert(&PN);
1375  if (DeadPHICycle(PU, PotentiallyDeadPHIs))
1376  return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1377  }
1378 
1379  // If this phi has a single use, and if that use just computes a value for
1380  // the next iteration of a loop, delete the phi. This occurs with unused
1381  // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1382  // common case here is good because the only other things that catch this
1383  // are induction variable analysis (sometimes) and ADCE, which is only run
1384  // late.
1385  if (PHIUser->hasOneUse() &&
1386  (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
1387  PHIUser->user_back() == &PN) {
1388  return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1389  }
1390  // When a PHI is used only to be compared with zero, it is safe to replace
1391  // an incoming value proved as known nonzero with any non-zero constant.
1392  // For example, in the code below, the incoming value %v can be replaced
1393  // with any non-zero constant based on the fact that the PHI is only used to
1394  // be compared with zero and %v is a known non-zero value:
1395  // %v = select %cond, 1, 2
1396  // %p = phi [%v, BB] ...
1397  // icmp eq, %p, 0
1398  auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
1399  // FIXME: To be simple, handle only integer type for now.
1400  if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
1401  match(CmpInst->getOperand(1), m_Zero())) {
1402  ConstantInt *NonZeroConst = nullptr;
1403  bool MadeChange = false;
1404  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1406  Value *VA = PN.getIncomingValue(i);
1407  if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
1408  if (!NonZeroConst)
1409  NonZeroConst = GetAnyNonZeroConstInt(PN);
1410 
1411  if (NonZeroConst != VA) {
1412  replaceOperand(PN, i, NonZeroConst);
1413  MadeChange = true;
1414  }
1415  }
1416  }
1417  if (MadeChange)
1418  return &PN;
1419  }
1420  }
1421 
1422  // We sometimes end up with phi cycles that non-obviously end up being the
1423  // same value, for example:
1424  // z = some value; x = phi (y, z); y = phi (x, z)
1425  // where the phi nodes don't necessarily need to be in the same block. Do a
1426  // quick check to see if the PHI node only contains a single non-phi value, if
1427  // so, scan to see if the phi cycle is actually equal to that value.
1428  {
1429  unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1430  // Scan for the first non-phi operand.
1431  while (InValNo != NumIncomingVals &&
1432  isa<PHINode>(PN.getIncomingValue(InValNo)))
1433  ++InValNo;
1434 
1435  if (InValNo != NumIncomingVals) {
1436  Value *NonPhiInVal = PN.getIncomingValue(InValNo);
1437 
1438  // Scan the rest of the operands to see if there are any conflicts, if so
1439  // there is no need to recursively scan other phis.
1440  for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1441  Value *OpVal = PN.getIncomingValue(InValNo);
1442  if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1443  break;
1444  }
1445 
1446  // If we scanned over all operands, then we have one unique value plus
1447  // phi values. Scan PHI nodes to see if they all merge in each other or
1448  // the value.
1449  if (InValNo == NumIncomingVals) {
1450  SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
1451  if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1452  return replaceInstUsesWith(PN, NonPhiInVal);
1453  }
1454  }
1455  }
1456 
1457  // If there are multiple PHIs, sort their operands so that they all list
1458  // the blocks in the same order. This will help identical PHIs be eliminated
1459  // by other passes. Other passes shouldn't depend on this for correctness
1460  // however.
1461  PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
1462  if (&PN != FirstPN)
1463  for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
1464  BasicBlock *BBA = PN.getIncomingBlock(i);
1465  BasicBlock *BBB = FirstPN->getIncomingBlock(i);
1466  if (BBA != BBB) {
1467  Value *VA = PN.getIncomingValue(i);
1468  unsigned j = PN.getBasicBlockIndex(BBB);
1469  Value *VB = PN.getIncomingValue(j);
1470  PN.setIncomingBlock(i, BBB);
1471  PN.setIncomingValue(i, VB);
1472  PN.setIncomingBlock(j, BBA);
1473  PN.setIncomingValue(j, VA);
1474  // NOTE: Instcombine normally would want us to "return &PN" if we
1475  // modified any of the operands of an instruction. However, since we
1476  // aren't adding or removing uses (just rearranging them) we don't do
1477  // this in this case.
1478  }
1479  }
1480 
1481  // Is there an identical PHI node in this basic block?
1482  for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1483  // Ignore the PHI node itself.
1484  if (&IdenticalPN == &PN)
1485  continue;
1486  // Note that even though we've just canonicalized this PHI, due to the
1487  // worklist visitation order, there are no guarantess that *every* PHI
1488  // has been canonicalized, so we can't just compare operands ranges.
1489  if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1490  continue;
1491  // Just use that PHI instead then.
1492  ++NumPHICSEs;
1493  return replaceInstUsesWith(PN, &IdenticalPN);
1494  }
1495 
1496  // If this is an integer PHI and we know that it has an illegal type, see if
1497  // it is only used by trunc or trunc(lshr) operations. If so, we split the
1498  // PHI into the various pieces being extracted. This sort of thing is
1499  // introduced when SROA promotes an aggregate to a single large integer type.
1500  if (PN.getType()->isIntegerTy() &&
1501  !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1502  if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1503  return Res;
1504 
1505  // Ultimately, try to replace this Phi with a dominating condition.
1506  if (auto *V = SimplifyUsingControlFlow(*this, PN, DT))
1507  return replaceInstUsesWith(PN, V);
1508 
1509  return nullptr;
1510 }
i
i
Definition: README.txt:29
llvm::array_pod_sort
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1450
llvm::BasicBlock::end
iterator end()
Definition: BasicBlock.h:298
llvm::predecessors
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:127
llvm::GetElementPtrInst::setIsInBounds
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Definition: Instructions.cpp:1810
llvm::IRBuilderBase::SetInsertPoint
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:184
llvm::combineMetadata
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:2484
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::DenseMapInfo< LoweredPHIRecord >::getHashValue
static unsigned getHashValue(const LoweredPHIRecord &Val)
Definition: InstCombinePHI.cpp:1056
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2719
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
llvm::User::operands
op_range operands()
Definition: User.h:242
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::InstCombinerImpl::foldPHIArgBinOpIntoPHI
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Definition: InstCombinePHI.cpp:404
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:217
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2123
op
#define op(i)
llvm::InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
Definition: InstCombinePHI.cpp:368
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::InstCombiner::Builder
BuilderTy & Builder
Definition: InstCombiner.h:56
Statistic.h
llvm::InsertValueInst::Create
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:2530
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3076
llvm::SmallDenseMap
Definition: DenseMap.h:880
ValueTracking.h
Local.h
llvm::InstCombinerImpl::foldPHIArgGEPIntoPHI
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:497
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:305
Shift
bool Shift
Definition: README.txt:468
llvm::CmpInst::isEquality
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Definition: Instructions.cpp:3790
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::BasicBlockEdge
Definition: Dominators.h:83
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
PHIsEqualValue
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
Definition: InstCombinePHI.cpp:980
llvm::Value::hasOneUser
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::CmpInst::getOpcode
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:791
STLExtras.h
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LoadInst::getAlign
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:223
llvm::PHINode::setIncomingValue
void setIncomingValue(unsigned i, Value *V)
Definition: Instructions.h:2732
llvm::InstCombinerImpl::foldPHIArgIntToPtrToPHI
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:304
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::GetElementPtrInst::getSourceElementType
Type * getSourceElementType() const
Definition: Instructions.h:1021
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1336
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
llvm::DenseMapInfo< LoweredPHIRecord >::getEmptyKey
static LoweredPHIRecord getEmptyKey()
Definition: InstCombinePHI.cpp:1050
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:115
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
CommandLine.h
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::Instruction::getNumSuccessors
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Definition: Instruction.cpp:765
llvm::DenseMapInfo
Definition: APInt.h:34
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:160
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1551
InstCombineInternal.h
DeadPHICycle
static bool DeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
Definition: InstCombinePHI.cpp:958
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2729
llvm::InstCombinerImpl::PHIArgMergedDebugLoc
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Definition: InstCombinePHI.cpp:42
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::DenseMapInfo< LoweredPHIRecord >::isEqual
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
Definition: InstCombinePHI.cpp:1060
llvm::InstCombinerImpl::foldPHIArgLoadIntoPHI
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:658
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
llvm::Instruction::isIdenticalToWhenDefined
bool isIdenticalToWhenDefined(const Instruction *I) const
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Definition: Instruction.cpp:495
llvm::Instruction::applyMergedLocation
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:815
GetAnyNonZeroConstInt
static ConstantInt * GetAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
Definition: InstCombinePHI.cpp:1005
llvm::BasicBlock::getFirstInsertionPt
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:253
llvm::PHINode::getIncomingValueForBlock
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: Instructions.h:2818
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition: InstrTypes.h:393
llvm::Instruction
Definition: Instruction.h:45
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:191
llvm::SimplifyInstruction
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:6328
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:354
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:925
SmallPtrSet.h
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
PatternMatch.h
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2725
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3749
llvm::CmpInst
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:710
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:190
llvm::InstCombinerImpl::foldPHIArgOpIntoPHI
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Definition: InstCombinePHI.cpp:846
llvm::InstCombinerImpl::visitPHINode
Instruction * visitPHINode(PHINode &PN)
Definition: InstCombinePHI.cpp:1325
llvm::Instruction::andIRFlags
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
Definition: Instruction.cpp:313
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2095
llvm::cl::opt
Definition: CommandLine.h:1432
llvm::LoadInst::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:273
llvm::PatternMatch::m_Zero
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:535
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:304
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::DenseMapInfo< LoweredPHIRecord >::getTombstoneKey
static LoweredPHIRecord getTombstoneKey()
Definition: InstCombinePHI.cpp:1053
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
llvm::ExtractValueInst::Create
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:2406
llvm::find
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1571
llvm::Instruction::user_back
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:91
llvm::PHINode::setIncomingBlock
void setIncomingBlock(unsigned i, BasicBlock *BB)
Definition: Instructions.h:2768
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2783
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Definition: InstCombinePHI.cpp:328
llvm::DenseMap
Definition: DenseMap.h:714
llvm::operator<
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:338
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:928
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::PHINode::getBasicBlockIndex
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: Instructions.h:2811
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:367
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:328
llvm::zip
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&... args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:736
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:650
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::GetElementPtrInst::Create
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:954
llvm::BinaryOperator
Definition: InstrTypes.h:189
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1558
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:179
llvm::PHINode::blocks
iterator_range< block_iterator > blocks()
Definition: Instructions.h:2711
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
if
if(llvm_vc STREQUAL "") set(fake_version_inc "$
Definition: CMakeLists.txt:14
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::CastInst
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:430
MaxNumPhis
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:152
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:687
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
j
return j(j<< 16)
llvm::Instruction::isAtomic
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
Definition: Instruction.cpp:604
llvm::InstCombinerImpl::foldIntegerTypedPHI
Instruction * foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
Definition: InstCombinePHI.cpp:105
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::CastInst::CreateBitOrPointerCast
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
Definition: Instructions.cpp:3231
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2675
SimplifyUsingControlFlow
static Value * SimplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
Definition: InstCombinePHI.cpp:1251
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::InstCombinerImpl::foldPHIArgZextsIntoPHI
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Definition: InstCombinePHI.cpp:772
llvm::InstCombinerImpl::SliceUpIllegalIntegerPHI
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Definition: InstCombinePHI.cpp:1077
llvm::InstCombiner
The core instruction combiner logic.
Definition: InstCombiner.h:45
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:413
llvm::codeview::VB
@ VB
Definition: CodeView.h:158
llvm::CmpInst::Create
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Definition: Instructions.cpp:3747
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:476
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:212
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:370
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::CmpInst::getPredicate
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:796
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2749
llvm::PHINode
Definition: Instructions.h:2633
llvm::PatternMatch
Definition: PatternMatch.h:47
llvm::Instruction::copyIRFlags
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
Definition: Instruction.cpp:289
llvm::IRBuilderBase::CreateNot
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1617
llvm::SmallPtrSetImpl< PHINode * >
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::CastInst::CreatePointerCast
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
Definition: Instructions.cpp:3168
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:62
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::CastInst::CreateZExtOrBitCast
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
Definition: Instructions.cpp:3120
llvm::cl::desc
Definition: CommandLine.h:412
llvm::BinaryOperator::Create
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Definition: Instructions.cpp:2674
isSafeAndProfitableToSinkLoad
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
Definition: InstCombinePHI.cpp:614
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:166
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236
llvm::PoisonValue::get
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1815