LLVM  16.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1 //===- InstCombinePHI.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitPHINode function.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
19 #include "llvm/IR/PatternMatch.h"
23 #include <optional>
24 
25 using namespace llvm;
26 using namespace llvm::PatternMatch;
27 
28 #define DEBUG_TYPE "instcombine"
29 
30 static cl::opt<unsigned>
31 MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32  cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33 
34 STATISTIC(NumPHIsOfInsertValues,
35  "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36 STATISTIC(NumPHIsOfExtractValues,
37  "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39 
40 /// The PHI arguments will be folded into a single operation with a PHI node
41 /// as input. The debug location of the single operation will be the merged
42 /// locations of the original PHI node arguments.
44  auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45  Inst->setDebugLoc(FirstInst->getDebugLoc());
46  // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47  // will be inefficient.
48  assert(!isa<CallInst>(Inst));
49 
50  for (Value *V : drop_begin(PN.incoming_values())) {
51  auto *I = cast<Instruction>(V);
52  Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53  }
54 }
55 
56 // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
57 // If there is an existing pointer typed PHI that produces the same value as PN,
58 // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
59 // PHI node:
60 //
61 // Case-1:
62 // bb1:
63 // int_init = PtrToInt(ptr_init)
64 // br label %bb2
65 // bb2:
66 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
67 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
68 // ptr_val2 = IntToPtr(int_val)
69 // ...
70 // use(ptr_val2)
71 // ptr_val_inc = ...
72 // inc_val_inc = PtrToInt(ptr_val_inc)
73 //
74 // ==>
75 // bb1:
76 // br label %bb2
77 // bb2:
78 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
79 // ...
80 // use(ptr_val)
81 // ptr_val_inc = ...
82 //
83 // Case-2:
84 // bb1:
85 // int_ptr = BitCast(ptr_ptr)
86 // int_init = Load(int_ptr)
87 // br label %bb2
88 // bb2:
89 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
90 // ptr_val2 = IntToPtr(int_val)
91 // ...
92 // use(ptr_val2)
93 // ptr_val_inc = ...
94 // inc_val_inc = PtrToInt(ptr_val_inc)
95 // ==>
96 // bb1:
97 // ptr_init = Load(ptr_ptr)
98 // br label %bb2
99 // bb2:
100 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
101 // ...
102 // use(ptr_val)
103 // ptr_val_inc = ...
104 // ...
105 //
107  if (!PN.getType()->isIntegerTy())
108  return false;
109  if (!PN.hasOneUse())
110  return false;
111 
112  auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
113  if (!IntToPtr)
114  return false;
115 
116  // Check if the pointer is actually used as pointer:
117  auto HasPointerUse = [](Instruction *IIP) {
118  for (User *U : IIP->users()) {
119  Value *Ptr = nullptr;
120  if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
121  Ptr = LoadI->getPointerOperand();
122  } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
123  Ptr = SI->getPointerOperand();
124  } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
125  Ptr = GI->getPointerOperand();
126  }
127 
128  if (Ptr && Ptr == IIP)
129  return true;
130  }
131  return false;
132  };
133 
134  if (!HasPointerUse(IntToPtr))
135  return false;
136 
137  if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
138  DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
139  return false;
140 
141  SmallVector<Value *, 4> AvailablePtrVals;
142  for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
143  BasicBlock *BB = std::get<0>(Incoming);
144  Value *Arg = std::get<1>(Incoming);
145 
146  // First look backward:
147  if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
148  AvailablePtrVals.emplace_back(PI->getOperand(0));
149  continue;
150  }
151 
152  // Next look forward:
153  Value *ArgIntToPtr = nullptr;
154  for (User *U : Arg->users()) {
155  if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
156  (DT.dominates(cast<Instruction>(U), BB) ||
157  cast<Instruction>(U)->getParent() == BB)) {
158  ArgIntToPtr = U;
159  break;
160  }
161  }
162 
163  if (ArgIntToPtr) {
164  AvailablePtrVals.emplace_back(ArgIntToPtr);
165  continue;
166  }
167 
168  // If Arg is defined by a PHI, allow it. This will also create
169  // more opportunities iteratively.
170  if (isa<PHINode>(Arg)) {
171  AvailablePtrVals.emplace_back(Arg);
172  continue;
173  }
174 
175  // For a single use integer load:
176  auto *LoadI = dyn_cast<LoadInst>(Arg);
177  if (!LoadI)
178  return false;
179 
180  if (!LoadI->hasOneUse())
181  return false;
182 
183  // Push the integer typed Load instruction into the available
184  // value set, and fix it up later when the pointer typed PHI
185  // is synthesized.
186  AvailablePtrVals.emplace_back(LoadI);
187  }
188 
189  // Now search for a matching PHI
190  auto *BB = PN.getParent();
191  assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
192  "Not enough available ptr typed incoming values");
193  PHINode *MatchingPtrPHI = nullptr;
194  unsigned NumPhis = 0;
195  for (PHINode &PtrPHI : BB->phis()) {
196  // FIXME: consider handling this in AggressiveInstCombine
197  if (NumPhis++ > MaxNumPhis)
198  return false;
199  if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
200  continue;
201  if (any_of(zip(PN.blocks(), AvailablePtrVals),
202  [&](const auto &BlockAndValue) {
203  BasicBlock *BB = std::get<0>(BlockAndValue);
204  Value *V = std::get<1>(BlockAndValue);
205  return PtrPHI.getIncomingValueForBlock(BB) != V;
206  }))
207  continue;
208  MatchingPtrPHI = &PtrPHI;
209  break;
210  }
211 
212  if (MatchingPtrPHI) {
213  assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
214  "Phi's Type does not match with IntToPtr");
215  // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
216  // to make sure another transform can't undo it in the meantime.
217  replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
218  eraseInstFromFunction(*IntToPtr);
219  eraseInstFromFunction(PN);
220  return true;
221  }
222 
223  // If it requires a conversion for every PHI operand, do not do it.
224  if (all_of(AvailablePtrVals, [&](Value *V) {
225  return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
226  }))
227  return false;
228 
229  // If any of the operand that requires casting is a terminator
230  // instruction, do not do it. Similarly, do not do the transform if the value
231  // is PHI in a block with no insertion point, for example, a catchswitch
232  // block, since we will not be able to insert a cast after the PHI.
233  if (any_of(AvailablePtrVals, [&](Value *V) {
234  if (V->getType() == IntToPtr->getType())
235  return false;
236  auto *Inst = dyn_cast<Instruction>(V);
237  if (!Inst)
238  return false;
239  if (Inst->isTerminator())
240  return true;
241  auto *BB = Inst->getParent();
242  if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
243  return true;
244  return false;
245  }))
246  return false;
247 
248  PHINode *NewPtrPHI = PHINode::Create(
249  IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
250 
251  InsertNewInstBefore(NewPtrPHI, PN);
253  for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
254  auto *IncomingBB = std::get<0>(Incoming);
255  auto *IncomingVal = std::get<1>(Incoming);
256 
257  if (IncomingVal->getType() == IntToPtr->getType()) {
258  NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
259  continue;
260  }
261 
262 #ifndef NDEBUG
263  LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
264  assert((isa<PHINode>(IncomingVal) ||
265  IncomingVal->getType()->isPointerTy() ||
266  (LoadI && LoadI->hasOneUse())) &&
267  "Can not replace LoadInst with multiple uses");
268 #endif
269  // Need to insert a BitCast.
270  // For an integer Load instruction with a single use, the load + IntToPtr
271  // cast will be simplified into a pointer load:
272  // %v = load i64, i64* %a.ip, align 8
273  // %v.cast = inttoptr i64 %v to float **
274  // ==>
275  // %v.ptrp = bitcast i64 * %a.ip to float **
276  // %v.cast = load float *, float ** %v.ptrp, align 8
277  Instruction *&CI = Casts[IncomingVal];
278  if (!CI) {
279  CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
280  IncomingVal->getName() + ".ptr");
281  if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
282  BasicBlock::iterator InsertPos(IncomingI);
283  InsertPos++;
284  BasicBlock *BB = IncomingI->getParent();
285  if (isa<PHINode>(IncomingI))
286  InsertPos = BB->getFirstInsertionPt();
287  assert(InsertPos != BB->end() && "should have checked above");
288  InsertNewInstBefore(CI, *InsertPos);
289  } else {
290  auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
291  InsertNewInstBefore(CI, *InsertBB->getFirstInsertionPt());
292  }
293  }
294  NewPtrPHI->addIncoming(CI, IncomingBB);
295  }
296 
297  // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
298  // to make sure another transform can't undo it in the meantime.
299  replaceInstUsesWith(*IntToPtr, NewPtrPHI);
300  eraseInstFromFunction(*IntToPtr);
301  eraseInstFromFunction(PN);
302  return true;
303 }
304 
305 // Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
306 // fold Phi-operand to bitcast.
308  // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
309  // Make sure all uses of phi are ptr2int.
310  if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
311  return nullptr;
312 
313  // Iterating over all operands to check presence of target pointers for
314  // optimization.
315  bool OperandWithRoundTripCast = false;
316  for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
317  if (auto *NewOp =
318  simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
319  PN.setIncomingValue(OpNum, NewOp);
320  OperandWithRoundTripCast = true;
321  }
322  }
323  if (!OperandWithRoundTripCast)
324  return nullptr;
325  return &PN;
326 }
327 
328 /// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
329 /// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
330 Instruction *
332  auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
333 
334  // Scan to see if all operands are `insertvalue`'s with the same indicies,
335  // and all have a single use.
336  for (Value *V : drop_begin(PN.incoming_values())) {
337  auto *I = dyn_cast<InsertValueInst>(V);
338  if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
339  return nullptr;
340  }
341 
342  // For each operand of an `insertvalue`
343  std::array<PHINode *, 2> NewOperands;
344  for (int OpIdx : {0, 1}) {
345  auto *&NewOperand = NewOperands[OpIdx];
346  // Create a new PHI node to receive the values the operand has in each
347  // incoming basic block.
348  NewOperand = PHINode::Create(
349  FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
350  FirstIVI->getOperand(OpIdx)->getName() + ".pn");
351  // And populate each operand's PHI with said values.
352  for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
353  NewOperand->addIncoming(
354  cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
355  std::get<0>(Incoming));
356  InsertNewInstBefore(NewOperand, PN);
357  }
358 
359  // And finally, create `insertvalue` over the newly-formed PHI nodes.
360  auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
361  FirstIVI->getIndices(), PN.getName());
362 
363  PHIArgMergedDebugLoc(NewIVI, PN);
364  ++NumPHIsOfInsertValues;
365  return NewIVI;
366 }
367 
368 /// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
369 /// turn this into a phi[a,b] and a single extractvalue.
370 Instruction *
372  auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
373 
374  // Scan to see if all operands are `extractvalue`'s with the same indicies,
375  // and all have a single use.
376  for (Value *V : drop_begin(PN.incoming_values())) {
377  auto *I = dyn_cast<ExtractValueInst>(V);
378  if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
379  I->getAggregateOperand()->getType() !=
380  FirstEVI->getAggregateOperand()->getType())
381  return nullptr;
382  }
383 
384  // Create a new PHI node to receive the values the aggregate operand has
385  // in each incoming basic block.
386  auto *NewAggregateOperand = PHINode::Create(
387  FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
388  FirstEVI->getAggregateOperand()->getName() + ".pn");
389  // And populate the PHI with said values.
390  for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
391  NewAggregateOperand->addIncoming(
392  cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
393  std::get<0>(Incoming));
394  InsertNewInstBefore(NewAggregateOperand, PN);
395 
396  // And finally, create `extractvalue` over the newly-formed PHI nodes.
397  auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
398  FirstEVI->getIndices(), PN.getName());
399 
400  PHIArgMergedDebugLoc(NewEVI, PN);
401  ++NumPHIsOfExtractValues;
402  return NewEVI;
403 }
404 
405 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
406 /// adds all have a single user, turn this into a phi and a single binop.
408  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
409  assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
410  unsigned Opc = FirstInst->getOpcode();
411  Value *LHSVal = FirstInst->getOperand(0);
412  Value *RHSVal = FirstInst->getOperand(1);
413 
414  Type *LHSType = LHSVal->getType();
415  Type *RHSType = RHSVal->getType();
416 
417  // Scan to see if all operands are the same opcode, and all have one user.
418  for (Value *V : drop_begin(PN.incoming_values())) {
419  Instruction *I = dyn_cast<Instruction>(V);
420  if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
421  // Verify type of the LHS matches so we don't fold cmp's of different
422  // types.
423  I->getOperand(0)->getType() != LHSType ||
424  I->getOperand(1)->getType() != RHSType)
425  return nullptr;
426 
427  // If they are CmpInst instructions, check their predicates
428  if (CmpInst *CI = dyn_cast<CmpInst>(I))
429  if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
430  return nullptr;
431 
432  // Keep track of which operand needs a phi node.
433  if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
434  if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
435  }
436 
437  // If both LHS and RHS would need a PHI, don't do this transformation,
438  // because it would increase the number of PHIs entering the block,
439  // which leads to higher register pressure. This is especially
440  // bad when the PHIs are in the header of a loop.
441  if (!LHSVal && !RHSVal)
442  return nullptr;
443 
444  // Otherwise, this is safe to transform!
445 
446  Value *InLHS = FirstInst->getOperand(0);
447  Value *InRHS = FirstInst->getOperand(1);
448  PHINode *NewLHS = nullptr, *NewRHS = nullptr;
449  if (!LHSVal) {
450  NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
451  FirstInst->getOperand(0)->getName() + ".pn");
452  NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
453  InsertNewInstBefore(NewLHS, PN);
454  LHSVal = NewLHS;
455  }
456 
457  if (!RHSVal) {
458  NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
459  FirstInst->getOperand(1)->getName() + ".pn");
460  NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
461  InsertNewInstBefore(NewRHS, PN);
462  RHSVal = NewRHS;
463  }
464 
465  // Add all operands to the new PHIs.
466  if (NewLHS || NewRHS) {
467  for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
468  BasicBlock *InBB = std::get<0>(Incoming);
469  Value *InVal = std::get<1>(Incoming);
470  Instruction *InInst = cast<Instruction>(InVal);
471  if (NewLHS) {
472  Value *NewInLHS = InInst->getOperand(0);
473  NewLHS->addIncoming(NewInLHS, InBB);
474  }
475  if (NewRHS) {
476  Value *NewInRHS = InInst->getOperand(1);
477  NewRHS->addIncoming(NewInRHS, InBB);
478  }
479  }
480  }
481 
482  if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
483  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
484  LHSVal, RHSVal);
485  PHIArgMergedDebugLoc(NewCI, PN);
486  return NewCI;
487  }
488 
489  BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
490  BinaryOperator *NewBinOp =
491  BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
492 
493  NewBinOp->copyIRFlags(PN.getIncomingValue(0));
494 
495  for (Value *V : drop_begin(PN.incoming_values()))
496  NewBinOp->andIRFlags(V);
497 
498  PHIArgMergedDebugLoc(NewBinOp, PN);
499  return NewBinOp;
500 }
501 
503  GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
504 
505  SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
506  FirstInst->op_end());
507  // This is true if all GEP bases are allocas and if all indices into them are
508  // constants.
509  bool AllBasePointersAreAllocas = true;
510 
511  // We don't want to replace this phi if the replacement would require
512  // more than one phi, which leads to higher register pressure. This is
513  // especially bad when the PHIs are in the header of a loop.
514  bool NeededPhi = false;
515 
516  bool AllInBounds = true;
517 
518  // Scan to see if all operands are the same opcode, and all have one user.
519  for (Value *V : drop_begin(PN.incoming_values())) {
520  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V);
521  if (!GEP || !GEP->hasOneUser() ||
522  GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
523  GEP->getNumOperands() != FirstInst->getNumOperands())
524  return nullptr;
525 
526  AllInBounds &= GEP->isInBounds();
527 
528  // Keep track of whether or not all GEPs are of alloca pointers.
529  if (AllBasePointersAreAllocas &&
530  (!isa<AllocaInst>(GEP->getOperand(0)) ||
531  !GEP->hasAllConstantIndices()))
532  AllBasePointersAreAllocas = false;
533 
534  // Compare the operand lists.
535  for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
536  if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
537  continue;
538 
539  // Don't merge two GEPs when two operands differ (introducing phi nodes)
540  // if one of the PHIs has a constant for the index. The index may be
541  // substantially cheaper to compute for the constants, so making it a
542  // variable index could pessimize the path. This also handles the case
543  // for struct indices, which must always be constant.
544  if (isa<ConstantInt>(FirstInst->getOperand(Op)) ||
545  isa<ConstantInt>(GEP->getOperand(Op)))
546  return nullptr;
547 
548  if (FirstInst->getOperand(Op)->getType() !=
549  GEP->getOperand(Op)->getType())
550  return nullptr;
551 
552  // If we already needed a PHI for an earlier operand, and another operand
553  // also requires a PHI, we'd be introducing more PHIs than we're
554  // eliminating, which increases register pressure on entry to the PHI's
555  // block.
556  if (NeededPhi)
557  return nullptr;
558 
559  FixedOperands[Op] = nullptr; // Needs a PHI.
560  NeededPhi = true;
561  }
562  }
563 
564  // If all of the base pointers of the PHI'd GEPs are from allocas, don't
565  // bother doing this transformation. At best, this will just save a bit of
566  // offset calculation, but all the predecessors will have to materialize the
567  // stack address into a register anyway. We'd actually rather *clone* the
568  // load up into the predecessors so that we have a load of a gep of an alloca,
569  // which can usually all be folded into the load.
570  if (AllBasePointersAreAllocas)
571  return nullptr;
572 
573  // Otherwise, this is safe to transform. Insert PHI nodes for each operand
574  // that is variable.
575  SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
576 
577  bool HasAnyPHIs = false;
578  for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
579  if (FixedOperands[I])
580  continue; // operand doesn't need a phi.
581  Value *FirstOp = FirstInst->getOperand(I);
582  PHINode *NewPN =
583  PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
584  InsertNewInstBefore(NewPN, PN);
585 
586  NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
587  OperandPhis[I] = NewPN;
588  FixedOperands[I] = NewPN;
589  HasAnyPHIs = true;
590  }
591 
592  // Add all operands to the new PHIs.
593  if (HasAnyPHIs) {
594  for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
595  BasicBlock *InBB = std::get<0>(Incoming);
596  Value *InVal = std::get<1>(Incoming);
597  GetElementPtrInst *InGEP = cast<GetElementPtrInst>(InVal);
598 
599  for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
600  if (PHINode *OpPhi = OperandPhis[Op])
601  OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
602  }
603  }
604 
605  Value *Base = FixedOperands[0];
606  GetElementPtrInst *NewGEP =
608  makeArrayRef(FixedOperands).slice(1));
609  if (AllInBounds) NewGEP->setIsInBounds();
610  PHIArgMergedDebugLoc(NewGEP, PN);
611  return NewGEP;
612 }
613 
614 /// Return true if we know that it is safe to sink the load out of the block
615 /// that defines it. This means that it must be obvious the value of the load is
616 /// not changed from the point of the load to the end of the block it is in.
617 ///
618 /// Finally, it is safe, but not profitable, to sink a load targeting a
619 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
620 /// to a register.
622  BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
623 
624  for (++BBI; BBI != E; ++BBI)
625  if (BBI->mayWriteToMemory()) {
626  // Calls that only access inaccessible memory do not block sinking the
627  // load.
628  if (auto *CB = dyn_cast<CallBase>(BBI))
629  if (CB->onlyAccessesInaccessibleMemory())
630  continue;
631  return false;
632  }
633 
634  // Check for non-address taken alloca. If not address-taken already, it isn't
635  // profitable to do this xform.
636  if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
637  bool IsAddressTaken = false;
638  for (User *U : AI->users()) {
639  if (isa<LoadInst>(U)) continue;
640  if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
641  // If storing TO the alloca, then the address isn't taken.
642  if (SI->getOperand(1) == AI) continue;
643  }
644  IsAddressTaken = true;
645  break;
646  }
647 
648  if (!IsAddressTaken && AI->isStaticAlloca())
649  return false;
650  }
651 
652  // If this load is a load from a GEP with a constant offset from an alloca,
653  // then we don't want to sink it. In its present form, it will be
654  // load [constant stack offset]. Sinking it will cause us to have to
655  // materialize the stack addresses in each predecessor in a register only to
656  // do a shared load from register in the successor.
657  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
658  if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
659  if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
660  return false;
661 
662  return true;
663 }
664 
666  LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
667 
668  // Can't forward swifterror through a phi.
669  if (FirstLI->getOperand(0)->isSwiftError())
670  return nullptr;
671 
672  // FIXME: This is overconservative; this transform is allowed in some cases
673  // for atomic operations.
674  if (FirstLI->isAtomic())
675  return nullptr;
676 
677  // When processing loads, we need to propagate two bits of information to the
678  // sunk load: whether it is volatile, and what its alignment is.
679  bool IsVolatile = FirstLI->isVolatile();
680  Align LoadAlignment = FirstLI->getAlign();
681  const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
682 
683  // We can't sink the load if the loaded value could be modified between the
684  // load and the PHI.
685  if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
687  return nullptr;
688 
689  // If the PHI is of volatile loads and the load block has multiple
690  // successors, sinking it would remove a load of the volatile value from
691  // the path through the other successor.
692  if (IsVolatile &&
693  FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
694  return nullptr;
695 
696  for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
697  BasicBlock *InBB = std::get<0>(Incoming);
698  Value *InVal = std::get<1>(Incoming);
699  LoadInst *LI = dyn_cast<LoadInst>(InVal);
700  if (!LI || !LI->hasOneUser() || LI->isAtomic())
701  return nullptr;
702 
703  // Make sure all arguments are the same type of operation.
704  if (LI->isVolatile() != IsVolatile ||
705  LI->getPointerAddressSpace() != LoadAddrSpace)
706  return nullptr;
707 
708  // Can't forward swifterror through a phi.
709  if (LI->getOperand(0)->isSwiftError())
710  return nullptr;
711 
712  // We can't sink the load if the loaded value could be modified between
713  // the load and the PHI.
714  if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
715  return nullptr;
716 
717  LoadAlignment = std::min(LoadAlignment, LI->getAlign());
718 
719  // If the PHI is of volatile loads and the load block has multiple
720  // successors, sinking it would remove a load of the volatile value from
721  // the path through the other successor.
722  if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
723  return nullptr;
724  }
725 
726  // Okay, they are all the same operation. Create a new PHI node of the
727  // correct type, and PHI together all of the LHS's of the instructions.
728  PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
730  PN.getName()+".in");
731 
732  Value *InVal = FirstLI->getOperand(0);
733  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
734  LoadInst *NewLI =
735  new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
736 
737  unsigned KnownIDs[] = {
738  LLVMContext::MD_tbaa,
739  LLVMContext::MD_range,
740  LLVMContext::MD_invariant_load,
741  LLVMContext::MD_alias_scope,
742  LLVMContext::MD_noalias,
743  LLVMContext::MD_nonnull,
744  LLVMContext::MD_align,
745  LLVMContext::MD_dereferenceable,
746  LLVMContext::MD_dereferenceable_or_null,
747  LLVMContext::MD_access_group,
748  };
749 
750  for (unsigned ID : KnownIDs)
751  NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
752 
753  // Add all operands to the new PHI and combine TBAA metadata.
754  for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
755  BasicBlock *BB = std::get<0>(Incoming);
756  Value *V = std::get<1>(Incoming);
757  LoadInst *LI = cast<LoadInst>(V);
758  combineMetadata(NewLI, LI, KnownIDs, true);
759  Value *NewInVal = LI->getOperand(0);
760  if (NewInVal != InVal)
761  InVal = nullptr;
762  NewPN->addIncoming(NewInVal, BB);
763  }
764 
765  if (InVal) {
766  // The new PHI unions all of the same values together. This is really
767  // common, so we handle it intelligently here for compile-time speed.
768  NewLI->setOperand(0, InVal);
769  delete NewPN;
770  } else {
771  InsertNewInstBefore(NewPN, PN);
772  }
773 
774  // If this was a volatile load that we are merging, make sure to loop through
775  // and mark all the input loads as non-volatile. If we don't do this, we will
776  // insert a new volatile load and the old ones will not be deletable.
777  if (IsVolatile)
778  for (Value *IncValue : PN.incoming_values())
779  cast<LoadInst>(IncValue)->setVolatile(false);
780 
781  PHIArgMergedDebugLoc(NewLI, PN);
782  return NewLI;
783 }
784 
785 /// TODO: This function could handle other cast types, but then it might
786 /// require special-casing a cast from the 'i1' type. See the comment in
787 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
789  // We cannot create a new instruction after the PHI if the terminator is an
790  // EHPad because there is no valid insertion point.
791  if (Instruction *TI = Phi.getParent()->getTerminator())
792  if (TI->isEHPad())
793  return nullptr;
794 
795  // Early exit for the common case of a phi with two operands. These are
796  // handled elsewhere. See the comment below where we check the count of zexts
797  // and constants for more details.
798  unsigned NumIncomingValues = Phi.getNumIncomingValues();
799  if (NumIncomingValues < 3)
800  return nullptr;
801 
802  // Find the narrower type specified by the first zext.
803  Type *NarrowType = nullptr;
804  for (Value *V : Phi.incoming_values()) {
805  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
806  NarrowType = Zext->getSrcTy();
807  break;
808  }
809  }
810  if (!NarrowType)
811  return nullptr;
812 
813  // Walk the phi operands checking that we only have zexts or constants that
814  // we can shrink for free. Store the new operands for the new phi.
815  SmallVector<Value *, 4> NewIncoming;
816  unsigned NumZexts = 0;
817  unsigned NumConsts = 0;
818  for (Value *V : Phi.incoming_values()) {
819  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
820  // All zexts must be identical and have one user.
821  if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
822  return nullptr;
823  NewIncoming.push_back(Zext->getOperand(0));
824  NumZexts++;
825  } else if (auto *C = dyn_cast<Constant>(V)) {
826  // Make sure that constants can fit in the new type.
827  Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
828  if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
829  return nullptr;
830  NewIncoming.push_back(Trunc);
831  NumConsts++;
832  } else {
833  // If it's not a cast or a constant, bail out.
834  return nullptr;
835  }
836  }
837 
838  // The more common cases of a phi with no constant operands or just one
839  // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
840  // respectively. foldOpIntoPhi() wants to do the opposite transform that is
841  // performed here. It tries to replicate a cast in the phi operand's basic
842  // block to expose other folding opportunities. Thus, InstCombine will
843  // infinite loop without this check.
844  if (NumConsts == 0 || NumZexts < 2)
845  return nullptr;
846 
847  // All incoming values are zexts or constants that are safe to truncate.
848  // Create a new phi node of the narrow type, phi together all of the new
849  // operands, and zext the result back to the original type.
850  PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
851  Phi.getName() + ".shrunk");
852  for (unsigned I = 0; I != NumIncomingValues; ++I)
853  NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
854 
855  InsertNewInstBefore(NewPhi, Phi);
856  return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
857 }
858 
859 /// If all operands to a PHI node are the same "unary" operator and they all are
860 /// only used by the PHI, PHI together their inputs, and do the operation once,
861 /// to the result of the PHI.
863  // We cannot create a new instruction after the PHI if the terminator is an
864  // EHPad because there is no valid insertion point.
865  if (Instruction *TI = PN.getParent()->getTerminator())
866  if (TI->isEHPad())
867  return nullptr;
868 
869  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
870 
871  if (isa<GetElementPtrInst>(FirstInst))
872  return foldPHIArgGEPIntoPHI(PN);
873  if (isa<LoadInst>(FirstInst))
874  return foldPHIArgLoadIntoPHI(PN);
875  if (isa<InsertValueInst>(FirstInst))
876  return foldPHIArgInsertValueInstructionIntoPHI(PN);
877  if (isa<ExtractValueInst>(FirstInst))
878  return foldPHIArgExtractValueInstructionIntoPHI(PN);
879 
880  // Scan the instruction, looking for input operations that can be folded away.
881  // If all input operands to the phi are the same instruction (e.g. a cast from
882  // the same type or "+42") we can pull the operation through the PHI, reducing
883  // code size and simplifying code.
884  Constant *ConstantOp = nullptr;
885  Type *CastSrcTy = nullptr;
886 
887  if (isa<CastInst>(FirstInst)) {
888  CastSrcTy = FirstInst->getOperand(0)->getType();
889 
890  // Be careful about transforming integer PHIs. We don't want to pessimize
891  // the code by turning an i32 into an i1293.
892  if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
893  if (!shouldChangeType(PN.getType(), CastSrcTy))
894  return nullptr;
895  }
896  } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
897  // Can fold binop, compare or shift here if the RHS is a constant,
898  // otherwise call FoldPHIArgBinOpIntoPHI.
899  ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
900  if (!ConstantOp)
901  return foldPHIArgBinOpIntoPHI(PN);
902  } else {
903  return nullptr; // Cannot fold this operation.
904  }
905 
906  // Check to see if all arguments are the same operation.
907  for (Value *V : drop_begin(PN.incoming_values())) {
908  Instruction *I = dyn_cast<Instruction>(V);
909  if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
910  return nullptr;
911  if (CastSrcTy) {
912  if (I->getOperand(0)->getType() != CastSrcTy)
913  return nullptr; // Cast operation must match.
914  } else if (I->getOperand(1) != ConstantOp) {
915  return nullptr;
916  }
917  }
918 
919  // Okay, they are all the same operation. Create a new PHI node of the
920  // correct type, and PHI together all of the LHS's of the instructions.
921  PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
923  PN.getName()+".in");
924 
925  Value *InVal = FirstInst->getOperand(0);
926  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
927 
928  // Add all operands to the new PHI.
929  for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
930  BasicBlock *BB = std::get<0>(Incoming);
931  Value *V = std::get<1>(Incoming);
932  Value *NewInVal = cast<Instruction>(V)->getOperand(0);
933  if (NewInVal != InVal)
934  InVal = nullptr;
935  NewPN->addIncoming(NewInVal, BB);
936  }
937 
938  Value *PhiVal;
939  if (InVal) {
940  // The new PHI unions all of the same values together. This is really
941  // common, so we handle it intelligently here for compile-time speed.
942  PhiVal = InVal;
943  delete NewPN;
944  } else {
945  InsertNewInstBefore(NewPN, PN);
946  PhiVal = NewPN;
947  }
948 
949  // Insert and return the new operation.
950  if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
951  CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
952  PN.getType());
953  PHIArgMergedDebugLoc(NewCI, PN);
954  return NewCI;
955  }
956 
957  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
958  BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
959  BinOp->copyIRFlags(PN.getIncomingValue(0));
960 
961  for (Value *V : drop_begin(PN.incoming_values()))
962  BinOp->andIRFlags(V);
963 
964  PHIArgMergedDebugLoc(BinOp, PN);
965  return BinOp;
966  }
967 
968  CmpInst *CIOp = cast<CmpInst>(FirstInst);
969  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
970  PhiVal, ConstantOp);
971  PHIArgMergedDebugLoc(NewCI, PN);
972  return NewCI;
973 }
974 
975 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
976 static bool isDeadPHICycle(PHINode *PN,
977  SmallPtrSetImpl<PHINode *> &PotentiallyDeadPHIs) {
978  if (PN->use_empty()) return true;
979  if (!PN->hasOneUse()) return false;
980 
981  // Remember this node, and if we find the cycle, return.
982  if (!PotentiallyDeadPHIs.insert(PN).second)
983  return true;
984 
985  // Don't scan crazily complex things.
986  if (PotentiallyDeadPHIs.size() == 16)
987  return false;
988 
989  if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
990  return isDeadPHICycle(PU, PotentiallyDeadPHIs);
991 
992  return false;
993 }
994 
995 /// Return true if this phi node is always equal to NonPhiInVal.
996 /// This happens with mutually cyclic phi nodes like:
997 /// z = some value; x = phi (y, z); y = phi (x, z)
998 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
999  SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
1000  // See if we already saw this PHI node.
1001  if (!ValueEqualPHIs.insert(PN).second)
1002  return true;
1003 
1004  // Don't scan crazily complex things.
1005  if (ValueEqualPHIs.size() == 16)
1006  return false;
1007 
1008  // Scan the operands to see if they are either phi nodes or are equal to
1009  // the value.
1010  for (Value *Op : PN->incoming_values()) {
1011  if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
1012  if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
1013  return false;
1014  } else if (Op != NonPhiInVal)
1015  return false;
1016  }
1017 
1018  return true;
1019 }
1020 
1021 /// Return an existing non-zero constant if this phi node has one, otherwise
1022 /// return constant 1.
1024  assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1025  for (Value *V : PN.operands())
1026  if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1027  if (!ConstVA->isZero())
1028  return ConstVA;
1029  return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1030 }
1031 
1032 namespace {
1033 struct PHIUsageRecord {
1034  unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1035  unsigned Shift; // The amount shifted.
1036  Instruction *Inst; // The trunc instruction.
1037 
1038  PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1039  : PHIId(Pn), Shift(Sh), Inst(User) {}
1040 
1041  bool operator<(const PHIUsageRecord &RHS) const {
1042  if (PHIId < RHS.PHIId) return true;
1043  if (PHIId > RHS.PHIId) return false;
1044  if (Shift < RHS.Shift) return true;
1045  if (Shift > RHS.Shift) return false;
1046  return Inst->getType()->getPrimitiveSizeInBits() <
1047  RHS.Inst->getType()->getPrimitiveSizeInBits();
1048  }
1049 };
1050 
1051 struct LoweredPHIRecord {
1052  PHINode *PN; // The PHI that was lowered.
1053  unsigned Shift; // The amount shifted.
1054  unsigned Width; // The width extracted.
1055 
1056  LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1057  : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1058 
1059  // Ctor form used by DenseMap.
1060  LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1061 };
1062 } // namespace
1063 
1064 namespace llvm {
1065  template<>
1066  struct DenseMapInfo<LoweredPHIRecord> {
1067  static inline LoweredPHIRecord getEmptyKey() {
1068  return LoweredPHIRecord(nullptr, 0);
1069  }
1070  static inline LoweredPHIRecord getTombstoneKey() {
1071  return LoweredPHIRecord(nullptr, 1);
1072  }
1073  static unsigned getHashValue(const LoweredPHIRecord &Val) {
1074  return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1075  (Val.Width>>3);
1076  }
1077  static bool isEqual(const LoweredPHIRecord &LHS,
1078  const LoweredPHIRecord &RHS) {
1079  return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1080  LHS.Width == RHS.Width;
1081  }
1082  };
1083 } // namespace llvm
1084 
1085 
1086 /// This is an integer PHI and we know that it has an illegal type: see if it is
1087 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1088 /// the various pieces being extracted. This sort of thing is introduced when
1089 /// SROA promotes an aggregate to large integer values.
1090 ///
1091 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1092 /// inttoptr. We should produce new PHIs in the right type.
1093 ///
1095  // PHIUsers - Keep track of all of the truncated values extracted from a set
1096  // of PHIs, along with their offset. These are the things we want to rewrite.
1098 
1099  // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1100  // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1101  // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1102  // check the uses of (to ensure they are all extracts).
1103  SmallVector<PHINode*, 8> PHIsToSlice;
1104  SmallPtrSet<PHINode*, 8> PHIsInspected;
1105 
1106  PHIsToSlice.push_back(&FirstPhi);
1107  PHIsInspected.insert(&FirstPhi);
1108 
1109  for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1110  PHINode *PN = PHIsToSlice[PHIId];
1111 
1112  // Scan the input list of the PHI. If any input is an invoke, and if the
1113  // input is defined in the predecessor, then we won't be split the critical
1114  // edge which is required to insert a truncate. Because of this, we have to
1115  // bail out.
1116  for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1117  BasicBlock *BB = std::get<0>(Incoming);
1118  Value *V = std::get<1>(Incoming);
1119  InvokeInst *II = dyn_cast<InvokeInst>(V);
1120  if (!II)
1121  continue;
1122  if (II->getParent() != BB)
1123  continue;
1124 
1125  // If we have a phi, and if it's directly in the predecessor, then we have
1126  // a critical edge where we need to put the truncate. Since we can't
1127  // split the edge in instcombine, we have to bail out.
1128  return nullptr;
1129  }
1130 
1131  // If the incoming value is a PHI node before a catchswitch, we cannot
1132  // extract the value within that BB because we cannot insert any non-PHI
1133  // instructions in the BB.
1134  for (auto *Pred : PN->blocks())
1135  if (Pred->getFirstInsertionPt() == Pred->end())
1136  return nullptr;
1137 
1138  for (User *U : PN->users()) {
1139  Instruction *UserI = cast<Instruction>(U);
1140 
1141  // If the user is a PHI, inspect its uses recursively.
1142  if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1143  if (PHIsInspected.insert(UserPN).second)
1144  PHIsToSlice.push_back(UserPN);
1145  continue;
1146  }
1147 
1148  // Truncates are always ok.
1149  if (isa<TruncInst>(UserI)) {
1150  PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1151  continue;
1152  }
1153 
1154  // Otherwise it must be a lshr which can only be used by one trunc.
1155  if (UserI->getOpcode() != Instruction::LShr ||
1156  !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1157  !isa<ConstantInt>(UserI->getOperand(1)))
1158  return nullptr;
1159 
1160  // Bail on out of range shifts.
1161  unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1162  if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1163  return nullptr;
1164 
1165  unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1166  PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1167  }
1168  }
1169 
1170  // If we have no users, they must be all self uses, just nuke the PHI.
1171  if (PHIUsers.empty())
1172  return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1173 
1174  // If this phi node is transformable, create new PHIs for all the pieces
1175  // extracted out of it. First, sort the users by their offset and size.
1176  array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1177 
1178  LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1179  for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1180  << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1181 
1182  // PredValues - This is a temporary used when rewriting PHI nodes. It is
1183  // hoisted out here to avoid construction/destruction thrashing.
1184  DenseMap<BasicBlock*, Value*> PredValues;
1185 
1186  // ExtractedVals - Each new PHI we introduce is saved here so we don't
1187  // introduce redundant PHIs.
1189 
1190  for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1191  unsigned PHIId = PHIUsers[UserI].PHIId;
1192  PHINode *PN = PHIsToSlice[PHIId];
1193  unsigned Offset = PHIUsers[UserI].Shift;
1194  Type *Ty = PHIUsers[UserI].Inst->getType();
1195 
1196  PHINode *EltPHI;
1197 
1198  // If we've already lowered a user like this, reuse the previously lowered
1199  // value.
1200  if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1201 
1202  // Otherwise, Create the new PHI node for this user.
1203  EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1204  PN->getName()+".off"+Twine(Offset), PN);
1205  assert(EltPHI->getType() != PN->getType() &&
1206  "Truncate didn't shrink phi?");
1207 
1208  for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1209  BasicBlock *Pred = std::get<0>(Incoming);
1210  Value *InVal = std::get<1>(Incoming);
1211  Value *&PredVal = PredValues[Pred];
1212 
1213  // If we already have a value for this predecessor, reuse it.
1214  if (PredVal) {
1215  EltPHI->addIncoming(PredVal, Pred);
1216  continue;
1217  }
1218 
1219  // Handle the PHI self-reuse case.
1220  if (InVal == PN) {
1221  PredVal = EltPHI;
1222  EltPHI->addIncoming(PredVal, Pred);
1223  continue;
1224  }
1225 
1226  if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1227  // If the incoming value was a PHI, and if it was one of the PHIs we
1228  // already rewrote it, just use the lowered value.
1229  if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1230  PredVal = Res;
1231  EltPHI->addIncoming(PredVal, Pred);
1232  continue;
1233  }
1234  }
1235 
1236  // Otherwise, do an extract in the predecessor.
1237  Builder.SetInsertPoint(Pred->getTerminator());
1238  Value *Res = InVal;
1239  if (Offset)
1240  Res = Builder.CreateLShr(
1241  Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1242  Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1243  PredVal = Res;
1244  EltPHI->addIncoming(Res, Pred);
1245 
1246  // If the incoming value was a PHI, and if it was one of the PHIs we are
1247  // rewriting, we will ultimately delete the code we inserted. This
1248  // means we need to revisit that PHI to make sure we extract out the
1249  // needed piece.
1250  if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1251  if (PHIsInspected.count(OldInVal)) {
1252  unsigned RefPHIId =
1253  find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1254  PHIUsers.push_back(
1255  PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1256  ++UserE;
1257  }
1258  }
1259  PredValues.clear();
1260 
1261  LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1262  << *EltPHI << '\n');
1263  ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1264  }
1265 
1266  // Replace the use of this piece with the PHI node.
1267  replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1268  }
1269 
1270  // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1271  // with poison.
1272  Value *Poison = PoisonValue::get(FirstPhi.getType());
1273  for (PHINode *PHI : drop_begin(PHIsToSlice))
1274  replaceInstUsesWith(*PHI, Poison);
1275  return replaceInstUsesWith(FirstPhi, Poison);
1276 }
1277 
1279  const DominatorTree &DT) {
1280  // Simplify the following patterns:
1281  // if (cond)
1282  // / \
1283  // ... ...
1284  // \ /
1285  // phi [true] [false]
1286  // and
1287  // switch (cond)
1288  // case v1: / \ case v2:
1289  // ... ...
1290  // \ /
1291  // phi [v1] [v2]
1292  // Make sure all inputs are constants.
1293  if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
1294  return nullptr;
1295 
1296  BasicBlock *BB = PN.getParent();
1297  // Do not bother with unreachable instructions.
1298  if (!DT.isReachableFromEntry(BB))
1299  return nullptr;
1300 
1301  // Determine which value the condition of the idom has for which successor.
1302  LLVMContext &Context = PN.getContext();
1303  auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1304  Value *Cond;
1307  auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1308  SuccForValue[C] = Succ;
1309  ++SuccCount[Succ];
1310  };
1311  if (auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1312  if (BI->isUnconditional())
1313  return nullptr;
1314 
1315  Cond = BI->getCondition();
1316  AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1317  AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1318  } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1319  Cond = SI->getCondition();
1320  ++SuccCount[SI->getDefaultDest()];
1321  for (auto Case : SI->cases())
1322  AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1323  } else {
1324  return nullptr;
1325  }
1326 
1327  if (Cond->getType() != PN.getType())
1328  return nullptr;
1329 
1330  // Check that edges outgoing from the idom's terminators dominate respective
1331  // inputs of the Phi.
1332  std::optional<bool> Invert;
1333  for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1334  auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1335  BasicBlock *Pred = std::get<1>(Pair);
1336  auto IsCorrectInput = [&](ConstantInt *Input) {
1337  // The input needs to be dominated by the corresponding edge of the idom.
1338  // This edge cannot be a multi-edge, as that would imply that multiple
1339  // different condition values follow the same edge.
1340  auto It = SuccForValue.find(Input);
1341  return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1342  DT.dominates(BasicBlockEdge(IDom, It->second),
1343  BasicBlockEdge(Pred, BB));
1344  };
1345 
1346  // Depending on the constant, the condition may need to be inverted.
1347  bool NeedsInvert;
1348  if (IsCorrectInput(Input))
1349  NeedsInvert = false;
1350  else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1351  NeedsInvert = true;
1352  else
1353  return nullptr;
1354 
1355  // Make sure the inversion requirement is always the same.
1356  if (Invert && *Invert != NeedsInvert)
1357  return nullptr;
1358 
1359  Invert = NeedsInvert;
1360  }
1361 
1362  if (!*Invert)
1363  return Cond;
1364 
1365  // This Phi is actually opposite to branching condition of IDom. We invert
1366  // the condition that will potentially open up some opportunities for
1367  // sinking.
1368  auto InsertPt = BB->getFirstInsertionPt();
1369  if (InsertPt != BB->end()) {
1370  Self.Builder.SetInsertPoint(&*InsertPt);
1371  return Self.Builder.CreateNot(Cond);
1372  }
1373 
1374  return nullptr;
1375 }
1376 
1377 // PHINode simplification
1378 //
1380  if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1381  return replaceInstUsesWith(PN, V);
1382 
1383  if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1384  return Result;
1385 
1386  if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1387  return Result;
1388 
1389  // If all PHI operands are the same operation, pull them through the PHI,
1390  // reducing code size.
1391  if (isa<Instruction>(PN.getIncomingValue(0)) &&
1392  isa<Instruction>(PN.getIncomingValue(1)) &&
1393  cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
1394  cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
1395  PN.getIncomingValue(0)->hasOneUser())
1396  if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1397  return Result;
1398 
1399  // If the incoming values are pointer casts of the same original value,
1400  // replace the phi with a single cast iff we can insert a non-PHI instruction.
1401  if (PN.getType()->isPointerTy() &&
1402  PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1403  Value *IV0 = PN.getIncomingValue(0);
1404  Value *IV0Stripped = IV0->stripPointerCasts();
1405  // Set to keep track of values known to be equal to IV0Stripped after
1406  // stripping pointer casts.
1407  SmallPtrSet<Value *, 4> CheckedIVs;
1408  CheckedIVs.insert(IV0);
1409  if (IV0 != IV0Stripped &&
1410  all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1411  return !CheckedIVs.insert(IV).second ||
1412  IV0Stripped == IV->stripPointerCasts();
1413  })) {
1414  return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1415  }
1416  }
1417 
1418  // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1419  // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1420  // PHI)... break the cycle.
1421  if (PN.hasOneUse()) {
1422  if (foldIntegerTypedPHI(PN))
1423  return nullptr;
1424 
1425  Instruction *PHIUser = cast<Instruction>(PN.user_back());
1426  if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1427  SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1428  PotentiallyDeadPHIs.insert(&PN);
1429  if (isDeadPHICycle(PU, PotentiallyDeadPHIs))
1430  return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1431  }
1432 
1433  // If this phi has a single use, and if that use just computes a value for
1434  // the next iteration of a loop, delete the phi. This occurs with unused
1435  // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1436  // common case here is good because the only other things that catch this
1437  // are induction variable analysis (sometimes) and ADCE, which is only run
1438  // late.
1439  if (PHIUser->hasOneUse() &&
1440  (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
1441  PHIUser->user_back() == &PN) {
1442  return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1443  }
1444  // When a PHI is used only to be compared with zero, it is safe to replace
1445  // an incoming value proved as known nonzero with any non-zero constant.
1446  // For example, in the code below, the incoming value %v can be replaced
1447  // with any non-zero constant based on the fact that the PHI is only used to
1448  // be compared with zero and %v is a known non-zero value:
1449  // %v = select %cond, 1, 2
1450  // %p = phi [%v, BB] ...
1451  // icmp eq, %p, 0
1452  auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
1453  // FIXME: To be simple, handle only integer type for now.
1454  if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
1455  match(CmpInst->getOperand(1), m_Zero())) {
1456  ConstantInt *NonZeroConst = nullptr;
1457  bool MadeChange = false;
1458  for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1460  Value *VA = PN.getIncomingValue(I);
1461  if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
1462  if (!NonZeroConst)
1463  NonZeroConst = getAnyNonZeroConstInt(PN);
1464 
1465  if (NonZeroConst != VA) {
1466  replaceOperand(PN, I, NonZeroConst);
1467  MadeChange = true;
1468  }
1469  }
1470  }
1471  if (MadeChange)
1472  return &PN;
1473  }
1474  }
1475 
1476  // We sometimes end up with phi cycles that non-obviously end up being the
1477  // same value, for example:
1478  // z = some value; x = phi (y, z); y = phi (x, z)
1479  // where the phi nodes don't necessarily need to be in the same block. Do a
1480  // quick check to see if the PHI node only contains a single non-phi value, if
1481  // so, scan to see if the phi cycle is actually equal to that value.
1482  {
1483  unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1484  // Scan for the first non-phi operand.
1485  while (InValNo != NumIncomingVals &&
1486  isa<PHINode>(PN.getIncomingValue(InValNo)))
1487  ++InValNo;
1488 
1489  if (InValNo != NumIncomingVals) {
1490  Value *NonPhiInVal = PN.getIncomingValue(InValNo);
1491 
1492  // Scan the rest of the operands to see if there are any conflicts, if so
1493  // there is no need to recursively scan other phis.
1494  for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1495  Value *OpVal = PN.getIncomingValue(InValNo);
1496  if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1497  break;
1498  }
1499 
1500  // If we scanned over all operands, then we have one unique value plus
1501  // phi values. Scan PHI nodes to see if they all merge in each other or
1502  // the value.
1503  if (InValNo == NumIncomingVals) {
1504  SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
1505  if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1506  return replaceInstUsesWith(PN, NonPhiInVal);
1507  }
1508  }
1509  }
1510 
1511  // If there are multiple PHIs, sort their operands so that they all list
1512  // the blocks in the same order. This will help identical PHIs be eliminated
1513  // by other passes. Other passes shouldn't depend on this for correctness
1514  // however.
1515  PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
1516  if (&PN != FirstPN)
1517  for (unsigned I = 0, E = FirstPN->getNumIncomingValues(); I != E; ++I) {
1518  BasicBlock *BBA = PN.getIncomingBlock(I);
1519  BasicBlock *BBB = FirstPN->getIncomingBlock(I);
1520  if (BBA != BBB) {
1521  Value *VA = PN.getIncomingValue(I);
1522  unsigned J = PN.getBasicBlockIndex(BBB);
1523  Value *VB = PN.getIncomingValue(J);
1524  PN.setIncomingBlock(I, BBB);
1525  PN.setIncomingValue(I, VB);
1526  PN.setIncomingBlock(J, BBA);
1527  PN.setIncomingValue(J, VA);
1528  // NOTE: Instcombine normally would want us to "return &PN" if we
1529  // modified any of the operands of an instruction. However, since we
1530  // aren't adding or removing uses (just rearranging them) we don't do
1531  // this in this case.
1532  }
1533  }
1534 
1535  // Is there an identical PHI node in this basic block?
1536  for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1537  // Ignore the PHI node itself.
1538  if (&IdenticalPN == &PN)
1539  continue;
1540  // Note that even though we've just canonicalized this PHI, due to the
1541  // worklist visitation order, there are no guarantess that *every* PHI
1542  // has been canonicalized, so we can't just compare operands ranges.
1543  if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1544  continue;
1545  // Just use that PHI instead then.
1546  ++NumPHICSEs;
1547  return replaceInstUsesWith(PN, &IdenticalPN);
1548  }
1549 
1550  // If this is an integer PHI and we know that it has an illegal type, see if
1551  // it is only used by trunc or trunc(lshr) operations. If so, we split the
1552  // PHI into the various pieces being extracted. This sort of thing is
1553  // introduced when SROA promotes an aggregate to a single large integer type.
1554  if (PN.getType()->isIntegerTy() &&
1555  !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1556  if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1557  return Res;
1558 
1559  // Ultimately, try to replace this Phi with a dominating condition.
1560  if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1561  return replaceInstUsesWith(PN, V);
1562 
1563  return nullptr;
1564 }
llvm::array_pod_sort
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:1642
llvm::BasicBlock::end
iterator end()
Definition: BasicBlock.h:308
llvm::GetElementPtrInst::setIsInBounds
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Definition: Instructions.cpp:1924
llvm::Instruction::getNumSuccessors
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
Definition: Instruction.cpp:814
llvm::IRBuilderBase::SetInsertPoint
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:179
llvm::combineMetadata
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:2617
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::DenseMapInfo< LoweredPHIRecord >::getHashValue
static unsigned getHashValue(const LoweredPHIRecord &Val)
Definition: InstCombinePHI.cpp:1073
llvm::drop_begin
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:387
llvm::ConstantExpr::getNot
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2624
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2785
llvm::Value::hasOneUse
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
InstCombiner.h
llvm::User::operands
op_range operands()
Definition: User.h:242
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
PHI
Rewrite undef for PHI
Definition: AMDGPURewriteUndefForPHI.cpp:101
llvm::InstCombinerImpl::foldPHIArgBinOpIntoPHI
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Definition: InstCombinePHI.cpp:407
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:237
llvm::ConstantExpr::getZExt
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2092
llvm::InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
Definition: InstCombinePHI.cpp:371
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
llvm::InstCombiner::Builder
BuilderTy & Builder
Definition: InstCombiner.h:58
Statistic.h
llvm::Instruction::isIdenticalToWhenDefined
bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Definition: Instruction.cpp:544
llvm::InsertValueInst::Create
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:2596
llvm::CastInst::Create
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Definition: Instructions.cpp:3340
llvm::SmallDenseMap
Definition: DenseMap.h:880
ValueTracking.h
Local.h
llvm::InstCombinerImpl::foldPHIArgGEPIntoPHI
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:502
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:315
Shift
bool Shift
Definition: README.txt:468
llvm::CmpInst::isEquality
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Definition: Instructions.cpp:4054
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::BasicBlockEdge
Definition: Dominators.h:98
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: GenericDomTree.h:351
PHIsEqualValue
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
Definition: InstCombinePHI.cpp:998
llvm::Value::hasOneUser
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:156
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::CmpInst::getOpcode
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:805
STLExtras.h
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::DomTreeNodeBase::getIDom
DomTreeNodeBase * getIDom() const
Definition: GenericDomTree.h:89
llvm::LoadInst::getAlign
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:217
llvm::PHINode::setIncomingValue
void setIncomingValue(unsigned i, Value *V)
Definition: Instructions.h:2798
llvm::InstCombinerImpl::foldPHIArgIntToPtrToPHI
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:307
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::GetElementPtrInst::getSourceElementType
Type * getSourceElementType() const
Definition: Instructions.h:1004
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1456
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::Value::isSwiftError
bool isSwiftError() const
Return true if this value is a swifterror value.
Definition: Value.cpp:1015
llvm::DenseMapInfo< LoweredPHIRecord >::getEmptyKey
static LoweredPHIRecord getEmptyKey()
Definition: InstCombinePHI.cpp:1067
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:187
CommandLine.h
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::DenseMapInfo
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: APInt.h:34
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:169
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
InstCombineInternal.h
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2795
llvm::InstCombinerImpl::PHIArgMergedDebugLoc
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Definition: InstCombinePHI.cpp:43
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::Instruction::isAtomic
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
Definition: Instruction.cpp:653
llvm::DenseMapInfo< LoweredPHIRecord >::isEqual
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
Definition: InstCombinePHI.cpp:1077
llvm::InstCombinerImpl::foldPHIArgLoadIntoPHI
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
Definition: InstCombinePHI.cpp:665
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:306
SI
@ SI
Definition: SIInstrInfo.cpp:7966
llvm::Instruction::applyMergedLocation
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
Definition: DebugInfo.cpp:838
isDeadPHICycle
static bool isDeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
Definition: InstCombinePHI.cpp:976
llvm::BasicBlock::getFirstInsertionPt
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:246
llvm::BinaryOperator::getOpcode
BinaryOps getOpcode() const
Definition: InstrTypes.h:393
llvm::Instruction
Definition: Instruction.h:42
llvm::Type::getScalarSizeInBits
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition: Type.cpp:189
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:364
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::InstCombinerImpl::foldIntegerTypedPHI
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
Definition: InstCombinePHI.cpp:106
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:879
simplifyUsingControlFlow
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
Definition: InstCombinePHI.cpp:1278
SmallPtrSet.h
PatternMatch.h
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2791
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:276
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3811
llvm::CmpInst
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:710
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:210
llvm::InstCombinerImpl::foldPHIArgOpIntoPHI
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Definition: InstCombinePHI.cpp:862
llvm::InstCombinerImpl::visitPHINode
Instruction * visitPHINode(PHINode &PN)
Definition: InstCombinePHI.cpp:1379
llvm::Instruction::andIRFlags
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
Definition: Instruction.cpp:359
llvm::ConstantExpr::getTrunc
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2064
llvm::cl::opt
Definition: CommandLine.h:1412
llvm::LoadInst::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:267
llvm::PatternMatch::m_Zero
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:537
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:298
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
getAnyNonZeroConstInt
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
Definition: InstCombinePHI.cpp:1023
llvm::DenseMapInfo< LoweredPHIRecord >::getTombstoneKey
static LoweredPHIRecord getTombstoneKey()
Definition: InstCombinePHI.cpp:1070
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::clear
void clear()
Definition: DenseMap.h:110
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition: GenericDomTree.h:88
llvm::ExtractValueInst::Create
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:2472
llvm::find
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1754
llvm::Instruction::user_back
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:88
llvm::PHINode::setIncomingBlock
void setIncomingBlock(unsigned i, BasicBlock *BB)
Definition: Instructions.h:2834
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2849
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
llvm::InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Definition: InstCombinePHI.cpp:331
llvm::DenseMap
Definition: DenseMap.h:714
llvm::operator<
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:347
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:929
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:447
llvm::PHINode::getBasicBlockIndex
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Definition: Instructions.h:2877
llvm::simplifyInstruction
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:6600
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:356
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:335
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::GetElementPtrInst::Create
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:955
llvm::BinaryOperator
Definition: InstrTypes.h:189
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1741
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:138
llvm::PHINode::blocks
iterator_range< block_iterator > blocks()
Definition: Instructions.h:2777
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
if
if(llvm_vc STREQUAL "") set(fake_version_inc "$
Definition: CMakeLists.txt:14
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::CastInst
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:430
llvm::AMDGPU::HSAMD::Kernel::Arg::Key::IsVolatile
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
Definition: AMDGPUMetadata.h:199
MaxNumPhis
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
llvm::ConstantInt::getFalse
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:834
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
llvm::ConstantInt::getTrue
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:827
llvm::CastInst::CreateBitOrPointerCast
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
Definition: Instructions.cpp:3495
llvm::DenseMapBase< SmallDenseMap< KeyT, ValueT, 4, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2741
llvm::InstCombinerImpl::foldPHIArgZextsIntoPHI
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Definition: InstCombinePHI.cpp:788
llvm::InstCombinerImpl::SliceUpIllegalIntegerPHI
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Definition: InstCombinePHI.cpp:1094
llvm::InstCombiner
The core instruction combiner logic.
Definition: InstCombiner.h:45
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:433
llvm::codeview::VB
@ VB
Definition: CodeView.h:155
llvm::CmpInst::Create
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Definition: Instructions.cpp:4011
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:475
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:211
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:359
llvm::zip
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:879
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
InstructionSimplify.h
llvm::CmpInst::getPredicate
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:810
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2815
llvm::PHINode
Definition: Instructions.h:2699
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:119
llvm::PatternMatch
Definition: PatternMatch.h:47
llvm::Instruction::copyIRFlags
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
Definition: Instruction.cpp:335
IV
static const uint32_t IV[8]
Definition: blake3_impl.h:85
llvm::IRBuilderBase::CreateNot
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1641
llvm::SmallPtrSetImpl< PHINode * >
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::CastInst::CreatePointerCast
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
Definition: Instructions.cpp:3432
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:59
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::CastInst::CreateZExtOrBitCast
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
Definition: Instructions.cpp:3384
llvm::cl::desc
Definition: CommandLine.h:413
llvm::BinaryOperator::Create
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Definition: Instructions.cpp:2936
isSafeAndProfitableToSinkLoad
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
Definition: InstCombinePHI.cpp:621
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:164
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236
llvm::PoisonValue::get
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1732