LLVM 23.0.0git
InstCombinePHI.cpp
Go to the documentation of this file.
1//===- InstCombinePHI.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitPHINode function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/Statistic.h"
23#include <optional>
24
25using namespace llvm;
26using namespace llvm::PatternMatch;
27
28#define DEBUG_TYPE "instcombine"
29
31MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40/// The PHI arguments will be folded into a single operation with a PHI node
41/// as input. The debug location of the single operation will be the merged
42/// locations of the original PHI node arguments.
44 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(PN.incoming_values())) {
51 auto *I = cast<Instruction>(V);
52 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53 }
54}
55
56/// If the phi is within a phi web, which is formed by the def-use chain
57/// of phis and all the phis in the web are only used in the other phis.
58/// In this case, these phis are dead and we will remove all of them.
62 Stack.push_back(&PN);
63 Visited.insert(&PN);
64 while (!Stack.empty()) {
65 PHINode *Phi = Stack.pop_back_val();
66 for (User *Use : Phi->users()) {
67 if (PHINode *PhiUse = dyn_cast<PHINode>(Use)) {
68 if (!Visited.insert(PhiUse).second)
69 continue;
70 // Early stop if the set of PHIs is large
71 if (Visited.size() >= 16)
72 return false;
73 Stack.push_back(PhiUse);
74 } else
75 return false;
76 }
77 }
78 for (PHINode *Phi : Visited)
79 replaceInstUsesWith(*Phi, PoisonValue::get(Phi->getType()));
80 for (PHINode *Phi : Visited)
82 return true;
83}
84
85// Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
86// If there is an existing pointer typed PHI that produces the same value as PN,
87// replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
88// PHI node:
89//
90// Case-1:
91// bb1:
92// int_init = PtrToInt(ptr_init)
93// br label %bb2
94// bb2:
95// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
96// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
97// ptr_val2 = IntToPtr(int_val)
98// ...
99// use(ptr_val2)
100// ptr_val_inc = ...
101// inc_val_inc = PtrToInt(ptr_val_inc)
102//
103// ==>
104// bb1:
105// br label %bb2
106// bb2:
107// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
108// ...
109// use(ptr_val)
110// ptr_val_inc = ...
111//
112// Case-2:
113// bb1:
114// int_ptr = BitCast(ptr_ptr)
115// int_init = Load(int_ptr)
116// br label %bb2
117// bb2:
118// int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
119// ptr_val2 = IntToPtr(int_val)
120// ...
121// use(ptr_val2)
122// ptr_val_inc = ...
123// inc_val_inc = PtrToInt(ptr_val_inc)
124// ==>
125// bb1:
126// ptr_init = Load(ptr_ptr)
127// br label %bb2
128// bb2:
129// ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
130// ...
131// use(ptr_val)
132// ptr_val_inc = ...
133// ...
134//
136 if (!PN.getType()->isIntegerTy())
137 return false;
138 if (!PN.hasOneUse())
139 return false;
140
141 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
142 if (!IntToPtr)
143 return false;
144
145 // Check if the pointer is actually used as pointer:
146 auto HasPointerUse = [](Instruction *IIP) {
147 for (User *U : IIP->users()) {
148 Value *Ptr = nullptr;
149 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
150 Ptr = LoadI->getPointerOperand();
151 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
152 Ptr = SI->getPointerOperand();
154 Ptr = GI->getPointerOperand();
155 }
156
157 if (Ptr && Ptr == IIP)
158 return true;
159 }
160 return false;
161 };
162
163 if (!HasPointerUse(IntToPtr))
164 return false;
165
166 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
167 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
168 return false;
169
170 SmallVector<Value *, 4> AvailablePtrVals;
171 for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
172 BasicBlock *BB = std::get<0>(Incoming);
173 Value *Arg = std::get<1>(Incoming);
174
175 // Arg could be a constant, constant expr, etc., which we don't cover here.
176 if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
177 return false;
178
179 // First look backward:
180 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
181 if (PI->getOperand(0)->getType() == IntToPtr->getType()) {
182 AvailablePtrVals.emplace_back(PI->getOperand(0));
183 continue;
184 }
185 }
186
187 // Next look forward:
188 Value *ArgIntToPtr = nullptr;
189 for (User *U : Arg->users()) {
190 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
191 (DT.dominates(cast<Instruction>(U), BB) ||
192 cast<Instruction>(U)->getParent() == BB)) {
193 ArgIntToPtr = U;
194 break;
195 }
196 }
197
198 if (ArgIntToPtr) {
199 AvailablePtrVals.emplace_back(ArgIntToPtr);
200 continue;
201 }
202
203 // If Arg is defined by a PHI, allow it. This will also create
204 // more opportunities iteratively.
205 if (isa<PHINode>(Arg)) {
206 AvailablePtrVals.emplace_back(Arg);
207 continue;
208 }
209
210 // For a single use integer load:
211 auto *LoadI = dyn_cast<LoadInst>(Arg);
212 if (!LoadI)
213 return false;
214
215 if (!LoadI->hasOneUse())
216 return false;
217
218 // Push the integer typed Load instruction into the available
219 // value set, and fix it up later when the pointer typed PHI
220 // is synthesized.
221 AvailablePtrVals.emplace_back(LoadI);
222 }
223
224 // Now search for a matching PHI
225 auto *BB = PN.getParent();
226 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
227 "Not enough available ptr typed incoming values");
228 PHINode *MatchingPtrPHI = nullptr;
229 unsigned NumPhis = 0;
230 for (PHINode &PtrPHI : BB->phis()) {
231 // FIXME: consider handling this in AggressiveInstCombine
232 if (NumPhis++ > MaxNumPhis)
233 return false;
234 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
235 continue;
236 if (any_of(zip(PN.blocks(), AvailablePtrVals),
237 [&](const auto &BlockAndValue) {
238 BasicBlock *BB = std::get<0>(BlockAndValue);
239 Value *V = std::get<1>(BlockAndValue);
240 return PtrPHI.getIncomingValueForBlock(BB) != V;
241 }))
242 continue;
243 MatchingPtrPHI = &PtrPHI;
244 break;
245 }
246
247 if (MatchingPtrPHI) {
248 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
249 "Phi's Type does not match with IntToPtr");
250 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
251 // to make sure another transform can't undo it in the meantime.
252 replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
253 eraseInstFromFunction(*IntToPtr);
255 return true;
256 }
257
258 // If it requires a conversion for every PHI operand, do not do it.
259 if (all_of(AvailablePtrVals, [&](Value *V) {
260 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
261 }))
262 return false;
263
264 // If any of the operand that requires casting is a terminator
265 // instruction, do not do it. Similarly, do not do the transform if the value
266 // is PHI in a block with no insertion point, for example, a catchswitch
267 // block, since we will not be able to insert a cast after the PHI.
268 if (any_of(AvailablePtrVals, [&](Value *V) {
269 if (V->getType() == IntToPtr->getType())
270 return false;
271 auto *Inst = dyn_cast<Instruction>(V);
272 if (!Inst)
273 return false;
274 if (Inst->isTerminator())
275 return true;
276 auto *BB = Inst->getParent();
277 if (isa<PHINode>(Inst) && !BB->hasInsertionPt())
278 return true;
279 return false;
280 }))
281 return false;
282
283 PHINode *NewPtrPHI = PHINode::Create(
284 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
285
286 InsertNewInstBefore(NewPtrPHI, PN.getIterator());
288 for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
289 auto *IncomingBB = std::get<0>(Incoming);
290 auto *IncomingVal = std::get<1>(Incoming);
291
292 if (IncomingVal->getType() == IntToPtr->getType()) {
293 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
294 continue;
295 }
296
297#ifndef NDEBUG
298 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
299 assert((isa<PHINode>(IncomingVal) ||
300 IncomingVal->getType()->isPointerTy() ||
301 (LoadI && LoadI->hasOneUse())) &&
302 "Can not replace LoadInst with multiple uses");
303#endif
304 // Need to insert a BitCast.
305 // For an integer Load instruction with a single use, the load + IntToPtr
306 // cast will be simplified into a pointer load:
307 // %v = load i64, i64* %a.ip, align 8
308 // %v.cast = inttoptr i64 %v to float **
309 // ==>
310 // %v.ptrp = bitcast i64 * %a.ip to float **
311 // %v.cast = load float *, float ** %v.ptrp, align 8
312 Instruction *&CI = Casts[IncomingVal];
313 if (!CI) {
314 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
315 IncomingVal->getName() + ".ptr");
316 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
317 BasicBlock::iterator InsertPos(IncomingI);
318 InsertPos++;
319 BasicBlock *BB = IncomingI->getParent();
320 if (isa<PHINode>(IncomingI))
321 InsertPos = BB->getFirstInsertionPt();
322 assert(InsertPos != BB->end() && "should have checked above");
323 InsertNewInstBefore(CI, InsertPos);
324 } else {
325 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
326 InsertNewInstBefore(CI, InsertBB->getFirstInsertionPt());
327 }
328 }
329 NewPtrPHI->addIncoming(CI, IncomingBB);
330 }
331
332 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
333 // to make sure another transform can't undo it in the meantime.
334 replaceInstUsesWith(*IntToPtr, NewPtrPHI);
335 eraseInstFromFunction(*IntToPtr);
337 return true;
338}
339
340// Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
341// fold Phi-operand to bitcast.
343 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
344 // Make sure all uses of phi are ptr2int.
346 return nullptr;
347
348 // Iterating over all operands to check presence of target pointers for
349 // optimization.
350 bool OperandWithRoundTripCast = false;
351 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
352 if (auto *NewOp =
353 simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
354 replaceOperand(PN, OpNum, NewOp);
355 OperandWithRoundTripCast = true;
356 }
357 }
358 if (!OperandWithRoundTripCast)
359 return nullptr;
360 return &PN;
361}
362
363/// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
364/// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
367 auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
368
369 // Scan to see if all operands are `insertvalue`'s with the same indices,
370 // and all have a single use.
371 for (Value *V : drop_begin(PN.incoming_values())) {
372 auto *I = dyn_cast<InsertValueInst>(V);
373 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
374 return nullptr;
375 }
376
377 // For each operand of an `insertvalue`
378 std::array<PHINode *, 2> NewOperands;
379 for (int OpIdx : {0, 1}) {
380 auto *&NewOperand = NewOperands[OpIdx];
381 // Create a new PHI node to receive the values the operand has in each
382 // incoming basic block.
383 NewOperand = PHINode::Create(
384 FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
385 FirstIVI->getOperand(OpIdx)->getName() + ".pn");
386 // And populate each operand's PHI with said values.
387 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
388 NewOperand->addIncoming(
389 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
390 std::get<0>(Incoming));
391 InsertNewInstBefore(NewOperand, PN.getIterator());
392 }
393
394 // And finally, create `insertvalue` over the newly-formed PHI nodes.
395 auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
396 FirstIVI->getIndices(), PN.getName());
397
398 PHIArgMergedDebugLoc(NewIVI, PN);
399 ++NumPHIsOfInsertValues;
400 return NewIVI;
401}
402
403/// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
404/// turn this into a phi[a,b] and a single extractvalue.
407 auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
408
409 // Scan to see if all operands are `extractvalue`'s with the same indices,
410 // and all have a single use.
411 for (Value *V : drop_begin(PN.incoming_values())) {
412 auto *I = dyn_cast<ExtractValueInst>(V);
413 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
414 I->getAggregateOperand()->getType() !=
415 FirstEVI->getAggregateOperand()->getType())
416 return nullptr;
417 }
418
419 // Create a new PHI node to receive the values the aggregate operand has
420 // in each incoming basic block.
421 auto *NewAggregateOperand = PHINode::Create(
422 FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
423 FirstEVI->getAggregateOperand()->getName() + ".pn");
424 // And populate the PHI with said values.
425 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
426 NewAggregateOperand->addIncoming(
427 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
428 std::get<0>(Incoming));
429 InsertNewInstBefore(NewAggregateOperand, PN.getIterator());
430
431 // And finally, create `extractvalue` over the newly-formed PHI nodes.
432 auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
433 FirstEVI->getIndices(), PN.getName());
434
435 PHIArgMergedDebugLoc(NewEVI, PN);
436 ++NumPHIsOfExtractValues;
437 return NewEVI;
438}
439
440/// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
441/// adds all have a single user, turn this into a phi and a single binop.
444 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
445 unsigned Opc = FirstInst->getOpcode();
446 Value *LHSVal = FirstInst->getOperand(0);
447 Value *RHSVal = FirstInst->getOperand(1);
448
449 Type *LHSType = LHSVal->getType();
450 Type *RHSType = RHSVal->getType();
451
452 // Scan to see if all operands are the same opcode, and all have one user.
453 for (Value *V : drop_begin(PN.incoming_values())) {
455 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
456 // Verify type of the LHS matches so we don't fold cmp's of different
457 // types.
458 I->getOperand(0)->getType() != LHSType ||
459 I->getOperand(1)->getType() != RHSType)
460 return nullptr;
461
462 // If they are CmpInst instructions, check their predicates
463 if (CmpInst *CI = dyn_cast<CmpInst>(I))
464 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
465 return nullptr;
466
467 // Keep track of which operand needs a phi node.
468 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
469 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
470 }
471
472 // If both LHS and RHS would need a PHI, don't do this transformation,
473 // because it would increase the number of PHIs entering the block,
474 // which leads to higher register pressure. This is especially
475 // bad when the PHIs are in the header of a loop.
476 if (!LHSVal && !RHSVal)
477 return nullptr;
478
479 // Otherwise, this is safe to transform!
480
481 Value *InLHS = FirstInst->getOperand(0);
482 Value *InRHS = FirstInst->getOperand(1);
483 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
484 if (!LHSVal) {
485 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
486 FirstInst->getOperand(0)->getName() + ".pn");
487 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
488 InsertNewInstBefore(NewLHS, PN.getIterator());
489 LHSVal = NewLHS;
490 }
491
492 if (!RHSVal) {
493 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
494 FirstInst->getOperand(1)->getName() + ".pn");
495 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
496 InsertNewInstBefore(NewRHS, PN.getIterator());
497 RHSVal = NewRHS;
498 }
499
500 // Add all operands to the new PHIs.
501 if (NewLHS || NewRHS) {
502 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
503 BasicBlock *InBB = std::get<0>(Incoming);
504 Value *InVal = std::get<1>(Incoming);
505 Instruction *InInst = cast<Instruction>(InVal);
506 if (NewLHS) {
507 Value *NewInLHS = InInst->getOperand(0);
508 NewLHS->addIncoming(NewInLHS, InBB);
509 }
510 if (NewRHS) {
511 Value *NewInRHS = InInst->getOperand(1);
512 NewRHS->addIncoming(NewInRHS, InBB);
513 }
514 }
515 }
516
517 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
518 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
519 LHSVal, RHSVal);
520 PHIArgMergedDebugLoc(NewCI, PN);
521 return NewCI;
522 }
523
524 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
525 BinaryOperator *NewBinOp =
526 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
527
528 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
529
530 for (Value *V : drop_begin(PN.incoming_values()))
531 NewBinOp->andIRFlags(V);
532
533 PHIArgMergedDebugLoc(NewBinOp, PN);
534 return NewBinOp;
535}
536
539
540 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
541 FirstInst->op_end());
542 // This is true if all GEP bases are allocas and if all indices into them are
543 // constants.
544 bool AllBasePointersAreAllocas = true;
545
546 // We don't want to replace this phi if the replacement would require
547 // more than one phi, which leads to higher register pressure. This is
548 // especially bad when the PHIs are in the header of a loop.
549 bool NeededPhi = false;
550
551 // Remember flags of the first phi-operand getelementptr.
552 GEPNoWrapFlags NW = FirstInst->getNoWrapFlags();
553
554 // Scan to see if all operands are the same opcode, and all have one user.
555 for (Value *V : drop_begin(PN.incoming_values())) {
557 if (!GEP || !GEP->hasOneUser() ||
558 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
559 GEP->getNumOperands() != FirstInst->getNumOperands())
560 return nullptr;
561
562 NW &= GEP->getNoWrapFlags();
563
564 // Keep track of whether or not all GEPs are of alloca pointers.
565 if (AllBasePointersAreAllocas &&
566 (!isa<AllocaInst>(GEP->getOperand(0)) ||
567 !GEP->hasAllConstantIndices()))
568 AllBasePointersAreAllocas = false;
569
570 // Compare the operand lists.
571 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
572 if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
573 continue;
574
575 // Don't merge two GEPs when two operands differ (introducing phi nodes)
576 // if one of the PHIs has a constant for the index. The index may be
577 // substantially cheaper to compute for the constants, so making it a
578 // variable index could pessimize the path. This also handles the case
579 // for struct indices, which must always be constant.
580 if (isa<Constant>(FirstInst->getOperand(Op)) ||
581 isa<Constant>(GEP->getOperand(Op)))
582 return nullptr;
583
584 if (FirstInst->getOperand(Op)->getType() !=
585 GEP->getOperand(Op)->getType())
586 return nullptr;
587
588 // If we already needed a PHI for an earlier operand, and another operand
589 // also requires a PHI, we'd be introducing more PHIs than we're
590 // eliminating, which increases register pressure on entry to the PHI's
591 // block.
592 if (NeededPhi)
593 return nullptr;
594
595 FixedOperands[Op] = nullptr; // Needs a PHI.
596 NeededPhi = true;
597 }
598 }
599
600 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
601 // bother doing this transformation. At best, this will just save a bit of
602 // offset calculation, but all the predecessors will have to materialize the
603 // stack address into a register anyway. We'd actually rather *clone* the
604 // load up into the predecessors so that we have a load of a gep of an alloca,
605 // which can usually all be folded into the load.
606 if (AllBasePointersAreAllocas)
607 return nullptr;
608
609 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
610 // that is variable.
611 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
612
613 bool HasAnyPHIs = false;
614 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
615 if (FixedOperands[I])
616 continue; // operand doesn't need a phi.
617 Value *FirstOp = FirstInst->getOperand(I);
618 PHINode *NewPN =
619 PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
620 InsertNewInstBefore(NewPN, PN.getIterator());
621
622 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
623 OperandPhis[I] = NewPN;
624 FixedOperands[I] = NewPN;
625 HasAnyPHIs = true;
626 }
627
628 // Add all operands to the new PHIs.
629 if (HasAnyPHIs) {
630 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
631 BasicBlock *InBB = std::get<0>(Incoming);
632 Value *InVal = std::get<1>(Incoming);
634
635 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
636 if (PHINode *OpPhi = OperandPhis[Op])
637 OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
638 }
639 }
640
641 Value *Base = FixedOperands[0];
642 GetElementPtrInst *NewGEP =
644 ArrayRef(FixedOperands).slice(1), NW);
645 PHIArgMergedDebugLoc(NewGEP, PN);
646 return NewGEP;
647}
648
649/// Return true if we know that it is safe to sink the load out of the block
650/// that defines it. This means that it must be obvious the value of the load is
651/// not changed from the point of the load to the end of the block it is in.
652///
653/// Finally, it is safe, but not profitable, to sink a load targeting a
654/// non-address-taken alloca. Doing so will cause us to not promote the alloca
655/// to a register.
657 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
658
659 for (++BBI; BBI != E; ++BBI)
660 if (BBI->mayWriteToMemory()) {
661 // Calls that only access inaccessible memory do not block sinking the
662 // load.
663 if (auto *CB = dyn_cast<CallBase>(BBI))
664 if (CB->onlyAccessesInaccessibleMemory())
665 continue;
666 return false;
667 }
668
669 // Check for non-address taken alloca. If not address-taken already, it isn't
670 // profitable to do this xform.
671 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
672 bool IsAddressTaken = false;
673 for (User *U : AI->users()) {
674 if (isa<LoadInst>(U)) continue;
675 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
676 // If storing TO the alloca, then the address isn't taken.
677 if (SI->getOperand(1) == AI) continue;
678 }
679 IsAddressTaken = true;
680 break;
681 }
682
683 if (!IsAddressTaken && AI->isStaticAlloca())
684 return false;
685 }
686
687 // If this load is a load from a GEP with a constant offset from an alloca,
688 // then we don't want to sink it. In its present form, it will be
689 // load [constant stack offset]. Sinking it will cause us to have to
690 // materialize the stack addresses in each predecessor in a register only to
691 // do a shared load from register in the successor.
692 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
693 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
694 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
695 return false;
696
697 return true;
698}
699
701 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
702
703 if (!canReplaceOperandWithVariable(FirstLI, 0))
704 return nullptr;
705
706 // FIXME: This is overconservative; this transform is allowed in some cases
707 // for atomic operations.
708 if (!FirstLI->isSimple())
709 return nullptr;
710
711 // When processing loads, we need to propagate the alignment and address
712 // space of the load.
713 Align LoadAlignment = FirstLI->getAlign();
714 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
715
716 // We can't sink the load if the loaded value could be modified between the
717 // load and the PHI.
718 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
720 return nullptr;
721
722 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
723 BasicBlock *InBB = std::get<0>(Incoming);
724 Value *InVal = std::get<1>(Incoming);
725 LoadInst *LI = dyn_cast<LoadInst>(InVal);
726 if (!LI || !LI->hasOneUser() || !LI->isSimple())
727 return nullptr;
728
729 // Make sure all arguments are the same type of operation.
730 if (LI->getPointerAddressSpace() != LoadAddrSpace)
731 return nullptr;
732
734 return nullptr;
735
736 // We can't sink the load if the loaded value could be modified between
737 // the load and the PHI.
738 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
739 return nullptr;
740
741 LoadAlignment = std::min(LoadAlignment, LI->getAlign());
742 }
743
744 // Okay, they are all the same operation. Create a new PHI node of the
745 // correct type, and PHI together all of the LHS's of the instructions.
746 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
748 PN.getName()+".in");
749
750 Value *InVal = FirstLI->getOperand(0);
751 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
752 LoadInst *NewLI = new LoadInst(FirstLI->getType(), NewPN, "",
753 /*IsVolatile=*/false, LoadAlignment);
754 NewLI->copyMetadata(*FirstLI);
755
756 // Add all operands to the new PHI and combine TBAA metadata.
757 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
758 BasicBlock *BB = std::get<0>(Incoming);
759 Value *V = std::get<1>(Incoming);
760 LoadInst *LI = cast<LoadInst>(V);
761 combineMetadataForCSE(NewLI, LI, true);
762 Value *NewInVal = LI->getOperand(0);
763 if (NewInVal != InVal)
764 InVal = nullptr;
765 NewPN->addIncoming(NewInVal, BB);
766 }
767
768 if (InVal) {
769 // The new PHI unions all of the same values together. This is really
770 // common, so we handle it intelligently here for compile-time speed.
771 NewLI->setOperand(0, InVal);
772 delete NewPN;
773 } else {
774 InsertNewInstBefore(NewPN, PN.getIterator());
775 }
776
777 PHIArgMergedDebugLoc(NewLI, PN);
778 return NewLI;
779}
780
781/// TODO: This function could handle other cast types, but then it might
782/// require special-casing a cast from the 'i1' type. See the comment in
783/// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
785 // We cannot create a new instruction after the PHI if the terminator is an
786 // EHPad because there is no valid insertion point.
787 if (Instruction *TI = Phi.getParent()->getTerminator())
788 if (TI->isEHPad())
789 return nullptr;
790
791 // Early exit for the common case of a phi with two operands. These are
792 // handled elsewhere. See the comment below where we check the count of zexts
793 // and constants for more details.
794 unsigned NumIncomingValues = Phi.getNumIncomingValues();
795 if (NumIncomingValues < 3)
796 return nullptr;
797
798 // Find the narrower type specified by the first zext.
799 Type *NarrowType = nullptr;
800 for (Value *V : Phi.incoming_values()) {
801 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
802 NarrowType = Zext->getSrcTy();
803 break;
804 }
805 }
806 if (!NarrowType)
807 return nullptr;
808
809 // Walk the phi operands checking that we only have zexts or constants that
810 // we can shrink for free. Store the new operands for the new phi.
811 SmallVector<Value *, 4> NewIncoming;
812 unsigned NumZexts = 0;
813 unsigned NumConsts = 0;
814 for (Value *V : Phi.incoming_values()) {
815 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
816 // All zexts must be identical and have one user.
817 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
818 return nullptr;
819 NewIncoming.push_back(Zext->getOperand(0));
820 NumZexts++;
821 } else if (auto *C = dyn_cast<Constant>(V)) {
822 // Make sure that constants can fit in the new type.
823 Constant *Trunc = getLosslessUnsignedTrunc(C, NarrowType, DL);
824 if (!Trunc)
825 return nullptr;
826 NewIncoming.push_back(Trunc);
827 NumConsts++;
828 } else {
829 // If it's not a cast or a constant, bail out.
830 return nullptr;
831 }
832 }
833
834 // The more common cases of a phi with no constant operands or just one
835 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
836 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
837 // performed here. It tries to replicate a cast in the phi operand's basic
838 // block to expose other folding opportunities. Thus, InstCombine will
839 // infinite loop without this check.
840 if (NumConsts == 0 || NumZexts < 2)
841 return nullptr;
842
843 // All incoming values are zexts or constants that are safe to truncate.
844 // Create a new phi node of the narrow type, phi together all of the new
845 // operands, and zext the result back to the original type.
846 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
847 Phi.getName() + ".shrunk");
848 for (unsigned I = 0; I != NumIncomingValues; ++I)
849 NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
850
851 InsertNewInstBefore(NewPhi, Phi.getIterator());
852 auto *CI = CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
853
854 // We use a dropped location here because the new ZExt is necessarily a merge
855 // of ZExtInsts and at least one constant from incoming branches; the presence
856 // of the constant means we have no viable DebugLoc from that branch, and
857 // therefore we must use a dropped location.
858 CI->setDebugLoc(DebugLoc::getDropped());
859 return CI;
860}
861
862/// If all operands to a PHI node are the same "unary" operator and they all are
863/// only used by the PHI, PHI together their inputs, and do the operation once,
864/// to the result of the PHI.
866 // We cannot create a new instruction after the PHI if the terminator is an
867 // EHPad because there is no valid insertion point.
868 if (Instruction *TI = PN.getParent()->getTerminator())
869 if (TI->isEHPad())
870 return nullptr;
871
873
874 if (isa<GetElementPtrInst>(FirstInst))
875 return foldPHIArgGEPIntoPHI(PN);
876 if (isa<LoadInst>(FirstInst))
877 return foldPHIArgLoadIntoPHI(PN);
878 if (isa<InsertValueInst>(FirstInst))
880 if (isa<ExtractValueInst>(FirstInst))
882
883 // Scan the instruction, looking for input operations that can be folded away.
884 // If all input operands to the phi are the same instruction (e.g. a cast from
885 // the same type or "+42") we can pull the operation through the PHI, reducing
886 // code size and simplifying code.
887 Constant *ConstantOp = nullptr;
888 Type *CastSrcTy = nullptr;
889
890 if (isa<CastInst>(FirstInst)) {
891 CastSrcTy = FirstInst->getOperand(0)->getType();
892
893 // Be careful about transforming integer PHIs. We don't want to pessimize
894 // the code by turning an i32 into an i1293.
895 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
896 if (!shouldChangeType(PN.getType(), CastSrcTy))
897 return nullptr;
898 }
899 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
900 // Can fold binop, compare or shift here if the RHS is a constant,
901 // otherwise call FoldPHIArgBinOpIntoPHI.
902 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
903 if (!ConstantOp)
904 return foldPHIArgBinOpIntoPHI(PN);
905 } else {
906 return nullptr; // Cannot fold this operation.
907 }
908
909 // Check to see if all arguments are the same operation.
910 for (Value *V : drop_begin(PN.incoming_values())) {
912 if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
913 return nullptr;
914 if (CastSrcTy) {
915 if (I->getOperand(0)->getType() != CastSrcTy)
916 return nullptr; // Cast operation must match.
917 } else if (I->getOperand(1) != ConstantOp) {
918 return nullptr;
919 }
920 }
921
922 // Okay, they are all the same operation. Create a new PHI node of the
923 // correct type, and PHI together all of the LHS's of the instructions.
924 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
926 PN.getName()+".in");
927
928 Value *InVal = FirstInst->getOperand(0);
929 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
930
931 // Add all operands to the new PHI.
932 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
933 BasicBlock *BB = std::get<0>(Incoming);
934 Value *V = std::get<1>(Incoming);
935 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
936 if (NewInVal != InVal)
937 InVal = nullptr;
938 NewPN->addIncoming(NewInVal, BB);
939 }
940
941 Value *PhiVal;
942 if (InVal) {
943 // The new PHI unions all of the same values together. This is really
944 // common, so we handle it intelligently here for compile-time speed.
945 PhiVal = InVal;
946 delete NewPN;
947 } else {
948 InsertNewInstBefore(NewPN, PN.getIterator());
949 PhiVal = NewPN;
950 }
951
952 // Insert and return the new operation.
953 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
954 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
955 PN.getType());
956 PHIArgMergedDebugLoc(NewCI, PN);
957 return NewCI;
958 }
959
960 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
961 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
962 BinOp->copyIRFlags(PN.getIncomingValue(0));
963
964 for (Value *V : drop_begin(PN.incoming_values()))
965 BinOp->andIRFlags(V);
966
967 PHIArgMergedDebugLoc(BinOp, PN);
968 return BinOp;
969 }
970
971 CmpInst *CIOp = cast<CmpInst>(FirstInst);
972 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
973 PhiVal, ConstantOp);
974 PHIArgMergedDebugLoc(NewCI, PN);
975 return NewCI;
976}
977
978/// Return true if this phi node is always equal to NonPhiInVal.
979/// This happens with mutually cyclic phi nodes like:
980/// z = some value; x = phi (y, z); y = phi (x, z)
981static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
982 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
983 // See if we already saw this PHI node.
984 if (!ValueEqualPHIs.insert(PN).second)
985 return true;
986
987 // Don't scan crazily complex things.
988 if (ValueEqualPHIs.size() >= 16)
989 return false;
990
991 // Scan the operands to see if they are either phi nodes or are equal to
992 // the value.
993 for (Value *Op : PN->incoming_values()) {
994 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
995 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) {
996 if (NonPhiInVal)
997 return false;
998 NonPhiInVal = OpPN;
999 }
1000 } else if (Op != NonPhiInVal)
1001 return false;
1002 }
1003
1004 return true;
1005}
1006
1007/// Return an existing non-zero constant if this phi node has one, otherwise
1008/// return constant 1.
1010 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1011 for (Value *V : PN.operands())
1012 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1013 if (!ConstVA->isZero())
1014 return ConstVA;
1015 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1016}
1017
1018namespace {
1019struct PHIUsageRecord {
1020 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1021 unsigned Shift; // The amount shifted.
1022 Instruction *Inst; // The trunc instruction.
1023
1024 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1025 : PHIId(Pn), Shift(Sh), Inst(User) {}
1026
1027 bool operator<(const PHIUsageRecord &RHS) const {
1028 if (PHIId < RHS.PHIId) return true;
1029 if (PHIId > RHS.PHIId) return false;
1030 if (Shift < RHS.Shift) return true;
1031 if (Shift > RHS.Shift) return false;
1032 return Inst->getType()->getPrimitiveSizeInBits() <
1034 }
1035};
1036
1037struct LoweredPHIRecord {
1038 PHINode *PN; // The PHI that was lowered.
1039 unsigned Shift; // The amount shifted.
1040 unsigned Width; // The width extracted.
1041
1042 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1043 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1044
1045 // Ctor form used by DenseMap.
1046 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1047};
1048} // namespace
1049
1050template <> struct llvm::DenseMapInfo<LoweredPHIRecord> {
1051 static inline LoweredPHIRecord getEmptyKey() {
1052 return LoweredPHIRecord(nullptr, 0);
1053 }
1054 static inline LoweredPHIRecord getTombstoneKey() {
1055 return LoweredPHIRecord(nullptr, 1);
1056 }
1057 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1058 return DenseMapInfo<PHINode *>::getHashValue(Val.PN) ^ (Val.Shift >> 3) ^
1059 (Val.Width >> 3);
1060 }
1061 static bool isEqual(const LoweredPHIRecord &LHS,
1062 const LoweredPHIRecord &RHS) {
1063 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift && LHS.Width == RHS.Width;
1064 }
1065};
1066
1067/// This is an integer PHI and we know that it has an illegal type: see if it is
1068/// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1069/// the various pieces being extracted. This sort of thing is introduced when
1070/// SROA promotes an aggregate to large integer values.
1071///
1072/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1073/// inttoptr. We should produce new PHIs in the right type.
1074///
1076 // PHIUsers - Keep track of all of the truncated values extracted from a set
1077 // of PHIs, along with their offset. These are the things we want to rewrite.
1079
1080 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1081 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1082 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1083 // check the uses of (to ensure they are all extracts).
1084 SmallVector<PHINode*, 8> PHIsToSlice;
1085 SmallPtrSet<PHINode*, 8> PHIsInspected;
1086
1087 PHIsToSlice.push_back(&FirstPhi);
1088 PHIsInspected.insert(&FirstPhi);
1089
1090 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1091 PHINode *PN = PHIsToSlice[PHIId];
1092
1093 // Scan the input list of the PHI. If any input is an invoke, and if the
1094 // input is defined in the predecessor, then we won't be split the critical
1095 // edge which is required to insert a truncate. Because of this, we have to
1096 // bail out.
1097 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1098 BasicBlock *BB = std::get<0>(Incoming);
1099 Value *V = std::get<1>(Incoming);
1101 if (!II)
1102 continue;
1103 if (II->getParent() != BB)
1104 continue;
1105
1106 // If we have a phi, and if it's directly in the predecessor, then we have
1107 // a critical edge where we need to put the truncate. Since we can't
1108 // split the edge in instcombine, we have to bail out.
1109 return nullptr;
1110 }
1111
1112 // If the incoming value is a PHI node before a catchswitch, we cannot
1113 // extract the value within that BB because we cannot insert any non-PHI
1114 // instructions in the BB.
1115 for (auto *Pred : PN->blocks())
1116 if (!Pred->hasInsertionPt())
1117 return nullptr;
1118
1119 for (User *U : PN->users()) {
1120 Instruction *UserI = cast<Instruction>(U);
1121
1122 // If the user is a PHI, inspect its uses recursively.
1123 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1124 if (PHIsInspected.insert(UserPN).second)
1125 PHIsToSlice.push_back(UserPN);
1126 continue;
1127 }
1128
1129 // Truncates are always ok.
1130 if (isa<TruncInst>(UserI)) {
1131 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1132 continue;
1133 }
1134
1135 // Otherwise it must be a lshr which can only be used by one trunc.
1136 if (UserI->getOpcode() != Instruction::LShr ||
1137 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1138 !isa<ConstantInt>(UserI->getOperand(1)))
1139 return nullptr;
1140
1141 // Bail on out of range shifts.
1142 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1143 if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1144 return nullptr;
1145
1146 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1147 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1148 }
1149 }
1150
1151 // If we have no users, they must be all self uses, just nuke the PHI.
1152 if (PHIUsers.empty())
1153 return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1154
1155 // If this phi node is transformable, create new PHIs for all the pieces
1156 // extracted out of it. First, sort the users by their offset and size.
1157 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1158
1159 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1160 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1161 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1162
1163 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1164 // hoisted out here to avoid construction/destruction thrashing.
1166
1167 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1168 // introduce redundant PHIs.
1170
1171 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1172 unsigned PHIId = PHIUsers[UserI].PHIId;
1173 PHINode *PN = PHIsToSlice[PHIId];
1174 unsigned Offset = PHIUsers[UserI].Shift;
1175 Type *Ty = PHIUsers[UserI].Inst->getType();
1176
1177 PHINode *EltPHI;
1178
1179 // If we've already lowered a user like this, reuse the previously lowered
1180 // value.
1181 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1182
1183 // Otherwise, Create the new PHI node for this user.
1184 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1185 PN->getName() + ".off" + Twine(Offset),
1186 PN->getIterator());
1187 assert(EltPHI->getType() != PN->getType() &&
1188 "Truncate didn't shrink phi?");
1189
1190 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1191 BasicBlock *Pred = std::get<0>(Incoming);
1192 Value *InVal = std::get<1>(Incoming);
1193 Value *&PredVal = PredValues[Pred];
1194
1195 // If we already have a value for this predecessor, reuse it.
1196 if (PredVal) {
1197 EltPHI->addIncoming(PredVal, Pred);
1198 continue;
1199 }
1200
1201 // Handle the PHI self-reuse case.
1202 if (InVal == PN) {
1203 PredVal = EltPHI;
1204 EltPHI->addIncoming(PredVal, Pred);
1205 continue;
1206 }
1207
1208 // If the incoming value was a PHI, and if it was one of the PHIs we
1209 // already rewrote it, just use the lowered value.
1210 if (Value *Res = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) {
1211 PredVal = Res;
1212 EltPHI->addIncoming(PredVal, Pred);
1213 continue;
1214 }
1215
1216 // Otherwise, do an extract in the predecessor.
1217 Builder.SetInsertPoint(Pred->getTerminator());
1218 Value *Res = InVal;
1219 if (Offset)
1220 Res = Builder.CreateLShr(
1221 Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1222 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1223 PredVal = Res;
1224 EltPHI->addIncoming(Res, Pred);
1225
1226 // If the incoming value was a PHI, and if it was one of the PHIs we are
1227 // rewriting, we will ultimately delete the code we inserted. This
1228 // means we need to revisit that PHI to make sure we extract out the
1229 // needed piece.
1230 if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1231 if (PHIsInspected.count(OldInVal)) {
1232 unsigned RefPHIId =
1233 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1234 PHIUsers.push_back(
1235 PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1236 ++UserE;
1237 }
1238 }
1239 PredValues.clear();
1240
1241 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1242 << *EltPHI << '\n');
1243 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1244 }
1245
1246 // Replace the use of this piece with the PHI node.
1247 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1248 }
1249
1250 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1251 // with poison.
1252 Value *Poison = PoisonValue::get(FirstPhi.getType());
1253 for (PHINode *PHI : drop_begin(PHIsToSlice))
1255 return replaceInstUsesWith(FirstPhi, Poison);
1256}
1257
1259 const DominatorTree &DT) {
1260 // Simplify the following patterns:
1261 // if (cond)
1262 // / \
1263 // ... ...
1264 // \ /
1265 // phi [true] [false]
1266 // and
1267 // switch (cond)
1268 // case v1: / \ case v2:
1269 // ... ...
1270 // \ /
1271 // phi [v1] [v2]
1272 // Make sure all inputs are constants.
1274 return nullptr;
1275
1276 BasicBlock *BB = PN.getParent();
1277 // Do not bother with unreachable instructions.
1278 if (!DT.isReachableFromEntry(BB))
1279 return nullptr;
1280
1281 // Determine which value the condition of the idom has for which successor.
1282 LLVMContext &Context = PN.getContext();
1283 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1284 Value *Cond;
1287 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1288 SuccForValue[C] = Succ;
1289 ++SuccCount[Succ];
1290 };
1291 if (auto *BI = dyn_cast<CondBrInst>(IDom->getTerminator())) {
1292 Cond = BI->getCondition();
1293 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1294 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1295 } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1296 Cond = SI->getCondition();
1297 ++SuccCount[SI->getDefaultDest()];
1298 for (auto Case : SI->cases())
1299 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1300 } else {
1301 return nullptr;
1302 }
1303
1304 if (Cond->getType() != PN.getType())
1305 return nullptr;
1306
1307 // Check that edges outgoing from the idom's terminators dominate respective
1308 // inputs of the Phi.
1309 std::optional<bool> Invert;
1310 for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1311 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1312 BasicBlock *Pred = std::get<1>(Pair);
1313 auto IsCorrectInput = [&](ConstantInt *Input) {
1314 // The input needs to be dominated by the corresponding edge of the idom.
1315 // This edge cannot be a multi-edge, as that would imply that multiple
1316 // different condition values follow the same edge.
1317 auto It = SuccForValue.find(Input);
1318 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1319 DT.dominates(BasicBlockEdge(IDom, It->second),
1320 BasicBlockEdge(Pred, BB));
1321 };
1322
1323 // Depending on the constant, the condition may need to be inverted.
1324 bool NeedsInvert;
1325 if (IsCorrectInput(Input))
1326 NeedsInvert = false;
1327 else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1328 NeedsInvert = true;
1329 else
1330 return nullptr;
1331
1332 // Make sure the inversion requirement is always the same.
1333 if (Invert && *Invert != NeedsInvert)
1334 return nullptr;
1335
1336 Invert = NeedsInvert;
1337 }
1338
1339 if (!*Invert)
1340 return Cond;
1341
1342 // This Phi is actually opposite to branching condition of IDom. We invert
1343 // the condition that will potentially open up some opportunities for
1344 // sinking.
1345 auto InsertPt = BB->getFirstInsertionPt();
1346 if (InsertPt != BB->end()) {
1347 Self.Builder.SetInsertPoint(&*BB, InsertPt);
1348 return Self.Builder.CreateNot(Cond);
1349 }
1350
1351 return nullptr;
1352}
1353
1354// Fold iv = phi(start, iv.next = iv2.next op start)
1355// where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1356// and iv2.start op start = start
1357// to iv = iv2 op start
1359 BasicBlock *BB = PN.getParent();
1360 if (PN.getNumIncomingValues() != 2)
1361 return nullptr;
1362
1363 Value *Start;
1364 Instruction *IvNext;
1365 BinaryOperator *Iv2Next;
1366 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1367 if (match(V2, m_c_BinOp(m_Specific(V1), m_BinOp(Iv2Next))) ||
1368 match(V2, m_GEP(m_Specific(V1), m_BinOp(Iv2Next)))) {
1369 Start = V1;
1370 IvNext = cast<Instruction>(V2);
1371 return true;
1372 }
1373 return false;
1374 };
1375
1376 if (!MatchOuterIV(PN.getIncomingValue(0), PN.getIncomingValue(1)) &&
1377 !MatchOuterIV(PN.getIncomingValue(1), PN.getIncomingValue(0)))
1378 return nullptr;
1379
1380 PHINode *Iv2;
1381 Value *Iv2Start, *Iv2Step;
1382 if (!matchSimpleRecurrence(Iv2Next, Iv2, Iv2Start, Iv2Step) ||
1383 Iv2->getParent() != BB)
1384 return nullptr;
1385
1386 auto *BO = dyn_cast<BinaryOperator>(IvNext);
1387 Constant *Identity =
1388 BO ? ConstantExpr::getBinOpIdentity(BO->getOpcode(), Iv2Start->getType())
1389 : Constant::getNullValue(Iv2Start->getType());
1390 if (Iv2Start != Identity)
1391 return nullptr;
1392
1393 Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
1394 if (!BO) {
1395 auto *GEP = cast<GEPOperator>(IvNext);
1396 return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
1397 cast<GEPOperator>(IvNext)->getNoWrapFlags());
1398 }
1399
1400 assert(BO->isCommutative() && "Must be commutative");
1401 Value *Res = Builder.CreateBinOp(BO->getOpcode(), Iv2, Start);
1402 cast<Instruction>(Res)->copyIRFlags(BO);
1403 return Res;
1404}
1405
1406// PHINode simplification
1407//
1409 if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1410 return replaceInstUsesWith(PN, V);
1411
1412 if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1413 return Result;
1414
1415 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1416 return Result;
1417
1418 // If all PHI operands are the same operation, pull them through the PHI,
1419 // reducing code size.
1420 auto *Inst0 = dyn_cast<Instruction>(PN.getIncomingValue(0));
1421 auto *Inst1 = dyn_cast<Instruction>(PN.getIncomingValue(1));
1422 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1423 Inst0->hasOneUser())
1424 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1425 return Result;
1426
1427 // If the incoming values are pointer casts of the same original value,
1428 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1429 if (PN.getType()->isPointerTy() && PN.getParent()->hasInsertionPt()) {
1430 Value *IV0 = PN.getIncomingValue(0);
1431 Value *IV0Stripped = IV0->stripPointerCasts();
1432 // Set to keep track of values known to be equal to IV0Stripped after
1433 // stripping pointer casts.
1434 SmallPtrSet<Value *, 4> CheckedIVs;
1435 CheckedIVs.insert(IV0);
1436 if (IV0 != IV0Stripped &&
1437 all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1438 return !CheckedIVs.insert(IV).second ||
1439 IV0Stripped == IV->stripPointerCasts();
1440 })) {
1441 return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1442 }
1443 }
1444
1445 if (foldDeadPhiWeb(PN))
1446 return nullptr;
1447
1448 // Optimization when the phi only has one use
1449 if (PN.hasOneUse()) {
1450 if (foldIntegerTypedPHI(PN))
1451 return nullptr;
1452
1453 // If this phi has a single use, and if that use just computes a value for
1454 // the next iteration of a loop, delete the phi. This occurs with unused
1455 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1456 // common case here is good because the only other things that catch this
1457 // are induction variable analysis (sometimes) and ADCE, which is only run
1458 // late.
1459 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1460 if (PHIUser->hasOneUse() &&
1461 (isa<BinaryOperator>(PHIUser) || isa<UnaryOperator>(PHIUser) ||
1462 isa<GetElementPtrInst>(PHIUser)) &&
1463 PHIUser->user_back() == &PN) {
1465 }
1466 }
1467
1468 // When a PHI is used only to be compared with zero, it is safe to replace
1469 // an incoming value proved as known nonzero with any non-zero constant.
1470 // For example, in the code below, the incoming value %v can be replaced
1471 // with any non-zero constant based on the fact that the PHI is only used to
1472 // be compared with zero and %v is a known non-zero value:
1473 // %v = select %cond, 1, 2
1474 // %p = phi [%v, BB] ...
1475 // icmp eq, %p, 0
1476 // FIXME: To be simple, handle only integer type for now.
1477 // This handles a small number of uses to keep the complexity down, and an
1478 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1479 // "or" will only add bits.
1480 if (!PN.hasNUsesOrMore(3)) {
1481 SmallVector<Instruction *> DropPoisonFlags;
1482 bool AllUsesOfPhiEndsInCmp = all_of(PN.users(), [&](User *U) {
1483 auto *CmpInst = dyn_cast<ICmpInst>(U);
1484 if (!CmpInst) {
1485 // This is always correct as OR only add bits and we are checking
1486 // against 0.
1487 if (U->hasOneUse() && match(U, m_c_Or(m_Specific(&PN), m_Value()))) {
1488 DropPoisonFlags.push_back(cast<Instruction>(U));
1489 CmpInst = dyn_cast<ICmpInst>(U->user_back());
1490 }
1491 }
1492 if (!CmpInst || !isa<IntegerType>(PN.getType()) ||
1493 !CmpInst->isEquality() || !match(CmpInst->getOperand(1), m_Zero())) {
1494 return false;
1495 }
1496 return true;
1497 });
1498 // All uses of PHI results in a compare with zero.
1499 if (AllUsesOfPhiEndsInCmp) {
1500 ConstantInt *NonZeroConst = nullptr;
1501 bool MadeChange = false;
1502 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1504 Value *VA = PN.getIncomingValue(I);
1505 if (isKnownNonZero(VA, getSimplifyQuery().getWithInstruction(CtxI))) {
1506 if (!NonZeroConst)
1507 NonZeroConst = getAnyNonZeroConstInt(PN);
1508 if (NonZeroConst != VA) {
1509 replaceOperand(PN, I, NonZeroConst);
1510 // The "disjoint" flag may no longer hold after the transform.
1511 for (Instruction *I : DropPoisonFlags)
1512 I->dropPoisonGeneratingFlags();
1513 MadeChange = true;
1514 }
1515 }
1516 }
1517 if (MadeChange)
1518 return &PN;
1519 }
1520 }
1521
1522 // We sometimes end up with phi cycles that non-obviously end up being the
1523 // same value, for example:
1524 // z = some value; x = phi (y, z); y = phi (x, z)
1525 // where the phi nodes don't necessarily need to be in the same block. Do a
1526 // quick check to see if the PHI node only contains a single non-phi value, if
1527 // so, scan to see if the phi cycle is actually equal to that value. If the
1528 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1529 // one of the phis itself does not have a single input.
1530 {
1531 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1532 // Scan for the first non-phi operand.
1533 while (InValNo != NumIncomingVals &&
1534 isa<PHINode>(PN.getIncomingValue(InValNo)))
1535 ++InValNo;
1536
1537 Value *NonPhiInVal =
1538 InValNo != NumIncomingVals ? PN.getIncomingValue(InValNo) : nullptr;
1539
1540 // Scan the rest of the operands to see if there are any conflicts, if so
1541 // there is no need to recursively scan other phis.
1542 if (NonPhiInVal)
1543 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1544 Value *OpVal = PN.getIncomingValue(InValNo);
1545 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1546 break;
1547 }
1548
1549 // If we scanned over all operands, then we have one unique value plus
1550 // phi values. Scan PHI nodes to see if they all merge in each other or
1551 // the value.
1552 if (InValNo == NumIncomingVals) {
1553 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1554 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1555 return replaceInstUsesWith(PN, NonPhiInVal);
1556 }
1557 }
1558
1559 // If there are multiple PHIs, sort their operands so that they all list
1560 // the blocks in the same order. This will help identical PHIs be eliminated
1561 // by other passes. Other passes shouldn't depend on this for correctness
1562 // however.
1563 auto Res = PredOrder.try_emplace(PN.getParent());
1564 if (!Res.second) {
1565 const auto &Preds = Res.first->second;
1566 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1567 BasicBlock *BBA = PN.getIncomingBlock(I);
1568 BasicBlock *BBB = Preds[I];
1569 if (BBA != BBB) {
1570 Value *VA = PN.getIncomingValue(I);
1571 unsigned J = PN.getBasicBlockIndex(BBB);
1572 Value *VB = PN.getIncomingValue(J);
1573 PN.setIncomingBlock(I, BBB);
1574 PN.setIncomingValue(I, VB);
1575 PN.setIncomingBlock(J, BBA);
1576 PN.setIncomingValue(J, VA);
1577 // NOTE: Instcombine normally would want us to "return &PN" if we
1578 // modified any of the operands of an instruction. However, since we
1579 // aren't adding or removing uses (just rearranging them) we don't do
1580 // this in this case.
1581 }
1582 }
1583 } else {
1584 // Remember the block order of the first encountered phi node.
1585 append_range(Res.first->second, PN.blocks());
1586 }
1587
1588 // Is there an identical PHI node in this basic block?
1589 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1590 // Ignore the PHI node itself.
1591 if (&IdenticalPN == &PN)
1592 continue;
1593 // Note that even though we've just canonicalized this PHI, due to the
1594 // worklist visitation order, there are no guarantess that *every* PHI
1595 // has been canonicalized, so we can't just compare operands ranges.
1596 if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1597 continue;
1598 // Just use that PHI instead then.
1599 ++NumPHICSEs;
1600 return replaceInstUsesWith(PN, &IdenticalPN);
1601 }
1602
1603 // If this is an integer PHI and we know that it has an illegal type, see if
1604 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1605 // PHI into the various pieces being extracted. This sort of thing is
1606 // introduced when SROA promotes an aggregate to a single large integer type.
1607 if (PN.getType()->isIntegerTy() &&
1608 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1609 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1610 return Res;
1611
1612 // Ultimately, try to replace this Phi with a dominating condition.
1613 if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1614 return replaceInstUsesWith(PN, V);
1615
1616 if (Value *Res = foldDependentIVs(PN, Builder))
1617 return replaceInstUsesWith(PN, Res);
1618
1619 return nullptr;
1620}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Hexagon Common GEP
This file provides internal interfaces used to implement the InstCombine.
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
static Value * foldDependentIVs(PHINode &PN, IRBuilderBase &Builder)
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
This file provides the interface for the instcombine pass implementation.
#define I(x, y, z)
Definition MD5.cpp:57
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
The Input class is used to parse a yaml document into in-memory structs and vectors.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:474
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition InstrTypes.h:760
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static DebugLoc getDropped()
Definition DebugLoc.h:163
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Represents flags for the getelementptr instruction/expression.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Type * getSourceElementType() const
LLVM_ABI GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1849
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitPHINode(PHINode &PN)
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
bool foldDeadPhiWeb(PHINode &PN)
If the phi is within a phi web, which is formed by the def-use chain of phis and all the phis in the ...
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
The core instruction combiner logic.
SimplifyQuery SQ
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
const DataLayout & DL
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
BuilderTy & Builder
const SimplifyQuery & getSimplifyQuery() const
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void applyMergedLocation(DebugLoc LocA, DebugLoc LocB)
Merge 2 debug locations and apply it to the Instruction.
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool isSimple() const
Align getAlign() const
Return the alignment of the access that is being performed.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:154
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
auto m_GEP(const OperandTypes &...Ops)
Matches GetElementPtrInst.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
bool operator<(int64_t V1, const APSInt &V2)
Definition APSInt.h:360
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3110
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition Local.cpp:3899
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition STLExtras.h:1596
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static unsigned getHashValue(const LoweredPHIRecord &Val)
static LoweredPHIRecord getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...