LLVM 19.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/IntrinsicsWebAssembly.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/Use.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
71#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
92
93#define DEBUG_TYPE "local"
94
95STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
96STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
97
99 "phicse-debug-hash",
100#ifdef EXPENSIVE_CHECKS
101 cl::init(true),
102#else
103 cl::init(false),
104#endif
106 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
107 "function is well-behaved w.r.t. its isEqual predicate"));
108
110 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
111 cl::desc(
112 "When the basic block contains not more than this number of PHI nodes, "
113 "perform a (faster!) exhaustive search instead of set-driven one."));
114
115// Max recursion depth for collectBitParts used when detecting bswap and
116// bitreverse idioms.
117static const unsigned BitPartRecursionMaxDepth = 48;
118
119//===----------------------------------------------------------------------===//
120// Local constant propagation.
121//
122
123/// ConstantFoldTerminator - If a terminator instruction is predicated on a
124/// constant value, convert it into an unconditional branch to the constant
125/// destination. This is a nontrivial operation because the successors of this
126/// basic block must have their PHI nodes updated.
127/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
128/// conditions and indirectbr addresses this might make dead if
129/// DeleteDeadConditions is true.
130bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
131 const TargetLibraryInfo *TLI,
132 DomTreeUpdater *DTU) {
133 Instruction *T = BB->getTerminator();
134 IRBuilder<> Builder(T);
135
136 // Branch - See if we are conditional jumping on constant
137 if (auto *BI = dyn_cast<BranchInst>(T)) {
138 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
139
140 BasicBlock *Dest1 = BI->getSuccessor(0);
141 BasicBlock *Dest2 = BI->getSuccessor(1);
142
143 if (Dest2 == Dest1) { // Conditional branch to same location?
144 // This branch matches something like this:
145 // br bool %cond, label %Dest, label %Dest
146 // and changes it into: br label %Dest
147
148 // Let the basic block know that we are letting go of one copy of it.
149 assert(BI->getParent() && "Terminator not inserted in block!");
150 Dest1->removePredecessor(BI->getParent());
151
152 // Replace the conditional branch with an unconditional one.
153 BranchInst *NewBI = Builder.CreateBr(Dest1);
154
155 // Transfer the metadata to the new branch instruction.
156 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
157 LLVMContext::MD_annotation});
158
159 Value *Cond = BI->getCondition();
160 BI->eraseFromParent();
161 if (DeleteDeadConditions)
163 return true;
164 }
165
166 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
167 // Are we branching on constant?
168 // YES. Change to unconditional branch...
169 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
170 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
171
172 // Let the basic block know that we are letting go of it. Based on this,
173 // it will adjust it's PHI nodes.
174 OldDest->removePredecessor(BB);
175
176 // Replace the conditional branch with an unconditional one.
177 BranchInst *NewBI = Builder.CreateBr(Destination);
178
179 // Transfer the metadata to the new branch instruction.
180 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
181 LLVMContext::MD_annotation});
182
183 BI->eraseFromParent();
184 if (DTU)
185 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
186 return true;
187 }
188
189 return false;
190 }
191
192 if (auto *SI = dyn_cast<SwitchInst>(T)) {
193 // If we are switching on a constant, we can convert the switch to an
194 // unconditional branch.
195 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
196 BasicBlock *DefaultDest = SI->getDefaultDest();
197 BasicBlock *TheOnlyDest = DefaultDest;
198
199 // If the default is unreachable, ignore it when searching for TheOnlyDest.
200 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
201 SI->getNumCases() > 0) {
202 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
203 }
204
205 bool Changed = false;
206
207 // Figure out which case it goes to.
208 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
209 // Found case matching a constant operand?
210 if (It->getCaseValue() == CI) {
211 TheOnlyDest = It->getCaseSuccessor();
212 break;
213 }
214
215 // Check to see if this branch is going to the same place as the default
216 // dest. If so, eliminate it as an explicit compare.
217 if (It->getCaseSuccessor() == DefaultDest) {
219 unsigned NCases = SI->getNumCases();
220 // Fold the case metadata into the default if there will be any branches
221 // left, unless the metadata doesn't match the switch.
222 if (NCases > 1 && MD) {
223 // Collect branch weights into a vector.
225 extractBranchWeights(MD, Weights);
226
227 // Merge weight of this case to the default weight.
228 unsigned Idx = It->getCaseIndex();
229 // TODO: Add overflow check.
230 Weights[0] += Weights[Idx + 1];
231 // Remove weight for this case.
232 std::swap(Weights[Idx + 1], Weights.back());
233 Weights.pop_back();
234 setBranchWeights(*SI, Weights);
235 }
236 // Remove this entry.
237 BasicBlock *ParentBB = SI->getParent();
238 DefaultDest->removePredecessor(ParentBB);
239 It = SI->removeCase(It);
240 End = SI->case_end();
241
242 // Removing this case may have made the condition constant. In that
243 // case, update CI and restart iteration through the cases.
244 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
245 CI = NewCI;
246 It = SI->case_begin();
247 }
248
249 Changed = true;
250 continue;
251 }
252
253 // Otherwise, check to see if the switch only branches to one destination.
254 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
255 // destinations.
256 if (It->getCaseSuccessor() != TheOnlyDest)
257 TheOnlyDest = nullptr;
258
259 // Increment this iterator as we haven't removed the case.
260 ++It;
261 }
262
263 if (CI && !TheOnlyDest) {
264 // Branching on a constant, but not any of the cases, go to the default
265 // successor.
266 TheOnlyDest = SI->getDefaultDest();
267 }
268
269 // If we found a single destination that we can fold the switch into, do so
270 // now.
271 if (TheOnlyDest) {
272 // Insert the new branch.
273 Builder.CreateBr(TheOnlyDest);
274 BasicBlock *BB = SI->getParent();
275
276 SmallSet<BasicBlock *, 8> RemovedSuccessors;
277
278 // Remove entries from PHI nodes which we no longer branch to...
279 BasicBlock *SuccToKeep = TheOnlyDest;
280 for (BasicBlock *Succ : successors(SI)) {
281 if (DTU && Succ != TheOnlyDest)
282 RemovedSuccessors.insert(Succ);
283 // Found case matching a constant operand?
284 if (Succ == SuccToKeep) {
285 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
286 } else {
287 Succ->removePredecessor(BB);
288 }
289 }
290
291 // Delete the old switch.
292 Value *Cond = SI->getCondition();
293 SI->eraseFromParent();
294 if (DeleteDeadConditions)
296 if (DTU) {
297 std::vector<DominatorTree::UpdateType> Updates;
298 Updates.reserve(RemovedSuccessors.size());
299 for (auto *RemovedSuccessor : RemovedSuccessors)
300 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
301 DTU->applyUpdates(Updates);
302 }
303 return true;
304 }
305
306 if (SI->getNumCases() == 1) {
307 // Otherwise, we can fold this switch into a conditional branch
308 // instruction if it has only one non-default destination.
309 auto FirstCase = *SI->case_begin();
310 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
311 FirstCase.getCaseValue(), "cond");
312
313 // Insert the new branch.
314 BranchInst *NewBr = Builder.CreateCondBr(Cond,
315 FirstCase.getCaseSuccessor(),
316 SI->getDefaultDest());
317 SmallVector<uint32_t> Weights;
318 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
319 uint32_t DefWeight = Weights[0];
320 uint32_t CaseWeight = Weights[1];
321 // The TrueWeight should be the weight for the single case of SI.
322 NewBr->setMetadata(LLVMContext::MD_prof,
323 MDBuilder(BB->getContext())
324 .createBranchWeights(CaseWeight, DefWeight));
325 }
326
327 // Update make.implicit metadata to the newly-created conditional branch.
328 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
329 if (MakeImplicitMD)
330 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
331
332 // Delete the old switch.
333 SI->eraseFromParent();
334 return true;
335 }
336 return Changed;
337 }
338
339 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
340 // indirectbr blockaddress(@F, @BB) -> br label @BB
341 if (auto *BA =
342 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
343 BasicBlock *TheOnlyDest = BA->getBasicBlock();
344 SmallSet<BasicBlock *, 8> RemovedSuccessors;
345
346 // Insert the new branch.
347 Builder.CreateBr(TheOnlyDest);
348
349 BasicBlock *SuccToKeep = TheOnlyDest;
350 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
351 BasicBlock *DestBB = IBI->getDestination(i);
352 if (DTU && DestBB != TheOnlyDest)
353 RemovedSuccessors.insert(DestBB);
354 if (IBI->getDestination(i) == SuccToKeep) {
355 SuccToKeep = nullptr;
356 } else {
357 DestBB->removePredecessor(BB);
358 }
359 }
360 Value *Address = IBI->getAddress();
361 IBI->eraseFromParent();
362 if (DeleteDeadConditions)
363 // Delete pointer cast instructions.
365
366 // Also zap the blockaddress constant if there are no users remaining,
367 // otherwise the destination is still marked as having its address taken.
368 if (BA->use_empty())
369 BA->destroyConstant();
370
371 // If we didn't find our destination in the IBI successor list, then we
372 // have undefined behavior. Replace the unconditional branch with an
373 // 'unreachable' instruction.
374 if (SuccToKeep) {
376 new UnreachableInst(BB->getContext(), BB);
377 }
378
379 if (DTU) {
380 std::vector<DominatorTree::UpdateType> Updates;
381 Updates.reserve(RemovedSuccessors.size());
382 for (auto *RemovedSuccessor : RemovedSuccessors)
383 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
384 DTU->applyUpdates(Updates);
385 }
386 return true;
387 }
388 }
389
390 return false;
391}
392
393//===----------------------------------------------------------------------===//
394// Local dead code elimination.
395//
396
397/// isInstructionTriviallyDead - Return true if the result produced by the
398/// instruction is not used, and the instruction has no side effects.
399///
401 const TargetLibraryInfo *TLI) {
402 if (!I->use_empty())
403 return false;
405}
406
408 Instruction *I, const TargetLibraryInfo *TLI) {
409 // Instructions that are "markers" and have implied meaning on code around
410 // them (without explicit uses), are not dead on unused paths.
411 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
412 if (II->getIntrinsicID() == Intrinsic::stacksave ||
413 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
414 II->isLifetimeStartOrEnd())
415 return false;
417}
418
420 const TargetLibraryInfo *TLI) {
421 if (I->isTerminator())
422 return false;
423
424 // We don't want the landingpad-like instructions removed by anything this
425 // general.
426 if (I->isEHPad())
427 return false;
428
429 // We don't want debug info removed by anything this general.
430 if (isa<DbgVariableIntrinsic>(I))
431 return false;
432
433 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
434 if (DLI->getLabel())
435 return false;
436 return true;
437 }
438
439 if (auto *CB = dyn_cast<CallBase>(I))
440 if (isRemovableAlloc(CB, TLI))
441 return true;
442
443 if (!I->willReturn()) {
444 auto *II = dyn_cast<IntrinsicInst>(I);
445 if (!II)
446 return false;
447
448 switch (II->getIntrinsicID()) {
449 case Intrinsic::experimental_guard: {
450 // Guards on true are operationally no-ops. In the future we can
451 // consider more sophisticated tradeoffs for guards considering potential
452 // for check widening, but for now we keep things simple.
453 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
454 return Cond && Cond->isOne();
455 }
456 // TODO: These intrinsics are not safe to remove, because this may remove
457 // a well-defined trap.
458 case Intrinsic::wasm_trunc_signed:
459 case Intrinsic::wasm_trunc_unsigned:
460 case Intrinsic::ptrauth_auth:
461 case Intrinsic::ptrauth_resign:
462 return true;
463 default:
464 return false;
465 }
466 }
467
468 if (!I->mayHaveSideEffects())
469 return true;
470
471 // Special case intrinsics that "may have side effects" but can be deleted
472 // when dead.
473 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
474 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
475 if (II->getIntrinsicID() == Intrinsic::stacksave ||
476 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
477 return true;
478
479 // Intrinsics declare sideeffects to prevent them from moving, but they are
480 // nops without users.
481 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
482 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
483 return true;
484
485 if (II->isLifetimeStartOrEnd()) {
486 auto *Arg = II->getArgOperand(1);
487 // Lifetime intrinsics are dead when their right-hand is undef.
488 if (isa<UndefValue>(Arg))
489 return true;
490 // If the right-hand is an alloc, global, or argument and the only uses
491 // are lifetime intrinsics then the intrinsics are dead.
492 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
493 return llvm::all_of(Arg->uses(), [](Use &Use) {
494 if (IntrinsicInst *IntrinsicUse =
495 dyn_cast<IntrinsicInst>(Use.getUser()))
496 return IntrinsicUse->isLifetimeStartOrEnd();
497 return false;
498 });
499 return false;
500 }
501
502 // Assumptions are dead if their condition is trivially true.
503 if (II->getIntrinsicID() == Intrinsic::assume &&
504 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) {
505 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
506 return !Cond->isZero();
507
508 return false;
509 }
510
511 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
512 std::optional<fp::ExceptionBehavior> ExBehavior =
513 FPI->getExceptionBehavior();
514 return *ExBehavior != fp::ebStrict;
515 }
516 }
517
518 if (auto *Call = dyn_cast<CallBase>(I)) {
519 if (Value *FreedOp = getFreedOperand(Call, TLI))
520 if (Constant *C = dyn_cast<Constant>(FreedOp))
521 return C->isNullValue() || isa<UndefValue>(C);
522 if (isMathLibCallNoop(Call, TLI))
523 return true;
524 }
525
526 // Non-volatile atomic loads from constants can be removed.
527 if (auto *LI = dyn_cast<LoadInst>(I))
528 if (auto *GV = dyn_cast<GlobalVariable>(
529 LI->getPointerOperand()->stripPointerCasts()))
530 if (!LI->isVolatile() && GV->isConstant())
531 return true;
532
533 return false;
534}
535
536/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
537/// trivially dead instruction, delete it. If that makes any of its operands
538/// trivially dead, delete them too, recursively. Return true if any
539/// instructions were deleted.
541 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
542 std::function<void(Value *)> AboutToDeleteCallback) {
543 Instruction *I = dyn_cast<Instruction>(V);
544 if (!I || !isInstructionTriviallyDead(I, TLI))
545 return false;
546
548 DeadInsts.push_back(I);
549 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
550 AboutToDeleteCallback);
551
552 return true;
553}
554
557 MemorySSAUpdater *MSSAU,
558 std::function<void(Value *)> AboutToDeleteCallback) {
559 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
560 for (; S != E; ++S) {
561 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
562 if (!I || !isInstructionTriviallyDead(I)) {
563 DeadInsts[S] = nullptr;
564 ++Alive;
565 }
566 }
567 if (Alive == E)
568 return false;
569 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
570 AboutToDeleteCallback);
571 return true;
572}
573
576 MemorySSAUpdater *MSSAU,
577 std::function<void(Value *)> AboutToDeleteCallback) {
578 // Process the dead instruction list until empty.
579 while (!DeadInsts.empty()) {
580 Value *V = DeadInsts.pop_back_val();
581 Instruction *I = cast_or_null<Instruction>(V);
582 if (!I)
583 continue;
585 "Live instruction found in dead worklist!");
586 assert(I->use_empty() && "Instructions with uses are not dead.");
587
588 // Don't lose the debug info while deleting the instructions.
590
591 if (AboutToDeleteCallback)
592 AboutToDeleteCallback(I);
593
594 // Null out all of the instruction's operands to see if any operand becomes
595 // dead as we go.
596 for (Use &OpU : I->operands()) {
597 Value *OpV = OpU.get();
598 OpU.set(nullptr);
599
600 if (!OpV->use_empty())
601 continue;
602
603 // If the operand is an instruction that became dead as we nulled out the
604 // operand, and if it is 'trivially' dead, delete it in a future loop
605 // iteration.
606 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
607 if (isInstructionTriviallyDead(OpI, TLI))
608 DeadInsts.push_back(OpI);
609 }
610 if (MSSAU)
611 MSSAU->removeMemoryAccess(I);
612
613 I->eraseFromParent();
614 }
615}
616
620 findDbgUsers(DbgUsers, I, &DPUsers);
621 for (auto *DII : DbgUsers)
622 DII->setKillLocation();
623 for (auto *DVR : DPUsers)
624 DVR->setKillLocation();
625 return !DbgUsers.empty() || !DPUsers.empty();
626}
627
628/// areAllUsesEqual - Check whether the uses of a value are all the same.
629/// This is similar to Instruction::hasOneUse() except this will also return
630/// true when there are no uses or multiple uses that all refer to the same
631/// value.
633 Value::user_iterator UI = I->user_begin();
634 Value::user_iterator UE = I->user_end();
635 if (UI == UE)
636 return true;
637
638 User *TheUse = *UI;
639 for (++UI; UI != UE; ++UI) {
640 if (*UI != TheUse)
641 return false;
642 }
643 return true;
644}
645
646/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
647/// dead PHI node, due to being a def-use chain of single-use nodes that
648/// either forms a cycle or is terminated by a trivially dead instruction,
649/// delete it. If that makes any of its operands trivially dead, delete them
650/// too, recursively. Return true if a change was made.
652 const TargetLibraryInfo *TLI,
653 llvm::MemorySSAUpdater *MSSAU) {
655 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
656 I = cast<Instruction>(*I->user_begin())) {
657 if (I->use_empty())
659
660 // If we find an instruction more than once, we're on a cycle that
661 // won't prove fruitful.
662 if (!Visited.insert(I).second) {
663 // Break the cycle and delete the instruction and its operands.
664 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
666 return true;
667 }
668 }
669 return false;
670}
671
672static bool
675 const DataLayout &DL,
676 const TargetLibraryInfo *TLI) {
677 if (isInstructionTriviallyDead(I, TLI)) {
679
680 // Null out all of the instruction's operands to see if any operand becomes
681 // dead as we go.
682 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
683 Value *OpV = I->getOperand(i);
684 I->setOperand(i, nullptr);
685
686 if (!OpV->use_empty() || I == OpV)
687 continue;
688
689 // If the operand is an instruction that became dead as we nulled out the
690 // operand, and if it is 'trivially' dead, delete it in a future loop
691 // iteration.
692 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
693 if (isInstructionTriviallyDead(OpI, TLI))
694 WorkList.insert(OpI);
695 }
696
697 I->eraseFromParent();
698
699 return true;
700 }
701
702 if (Value *SimpleV = simplifyInstruction(I, DL)) {
703 // Add the users to the worklist. CAREFUL: an instruction can use itself,
704 // in the case of a phi node.
705 for (User *U : I->users()) {
706 if (U != I) {
707 WorkList.insert(cast<Instruction>(U));
708 }
709 }
710
711 // Replace the instruction with its simplified value.
712 bool Changed = false;
713 if (!I->use_empty()) {
714 I->replaceAllUsesWith(SimpleV);
715 Changed = true;
716 }
717 if (isInstructionTriviallyDead(I, TLI)) {
718 I->eraseFromParent();
719 Changed = true;
720 }
721 return Changed;
722 }
723 return false;
724}
725
726/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
727/// simplify any instructions in it and recursively delete dead instructions.
728///
729/// This returns true if it changed the code, note that it can delete
730/// instructions in other blocks as well in this block.
732 const TargetLibraryInfo *TLI) {
733 bool MadeChange = false;
734 const DataLayout &DL = BB->getModule()->getDataLayout();
735
736#ifndef NDEBUG
737 // In debug builds, ensure that the terminator of the block is never replaced
738 // or deleted by these simplifications. The idea of simplification is that it
739 // cannot introduce new instructions, and there is no way to replace the
740 // terminator of a block without introducing a new instruction.
741 AssertingVH<Instruction> TerminatorVH(&BB->back());
742#endif
743
745 // Iterate over the original function, only adding insts to the worklist
746 // if they actually need to be revisited. This avoids having to pre-init
747 // the worklist with the entire function's worth of instructions.
748 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
749 BI != E;) {
750 assert(!BI->isTerminator());
751 Instruction *I = &*BI;
752 ++BI;
753
754 // We're visiting this instruction now, so make sure it's not in the
755 // worklist from an earlier visit.
756 if (!WorkList.count(I))
757 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
758 }
759
760 while (!WorkList.empty()) {
761 Instruction *I = WorkList.pop_back_val();
762 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
763 }
764 return MadeChange;
765}
766
767//===----------------------------------------------------------------------===//
768// Control Flow Graph Restructuring.
769//
770
772 DomTreeUpdater *DTU) {
773
774 // If BB has single-entry PHI nodes, fold them.
775 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
776 Value *NewVal = PN->getIncomingValue(0);
777 // Replace self referencing PHI with poison, it must be dead.
778 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
779 PN->replaceAllUsesWith(NewVal);
780 PN->eraseFromParent();
781 }
782
783 BasicBlock *PredBB = DestBB->getSinglePredecessor();
784 assert(PredBB && "Block doesn't have a single predecessor!");
785
786 bool ReplaceEntryBB = PredBB->isEntryBlock();
787
788 // DTU updates: Collect all the edges that enter
789 // PredBB. These dominator edges will be redirected to DestBB.
791
792 if (DTU) {
793 // To avoid processing the same predecessor more than once.
795 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
796 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
797 // This predecessor of PredBB may already have DestBB as a successor.
798 if (PredOfPredBB != PredBB)
799 if (SeenPreds.insert(PredOfPredBB).second)
800 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
801 SeenPreds.clear();
802 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
803 if (SeenPreds.insert(PredOfPredBB).second)
804 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
805 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
806 }
807
808 // Zap anything that took the address of DestBB. Not doing this will give the
809 // address an invalid value.
810 if (DestBB->hasAddressTaken()) {
811 BlockAddress *BA = BlockAddress::get(DestBB);
812 Constant *Replacement =
813 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
815 BA->getType()));
816 BA->destroyConstant();
817 }
818
819 // Anything that branched to PredBB now branches to DestBB.
820 PredBB->replaceAllUsesWith(DestBB);
821
822 // Splice all the instructions from PredBB to DestBB.
823 PredBB->getTerminator()->eraseFromParent();
824 DestBB->splice(DestBB->begin(), PredBB);
825 new UnreachableInst(PredBB->getContext(), PredBB);
826
827 // If the PredBB is the entry block of the function, move DestBB up to
828 // become the entry block after we erase PredBB.
829 if (ReplaceEntryBB)
830 DestBB->moveAfter(PredBB);
831
832 if (DTU) {
833 assert(PredBB->size() == 1 &&
834 isa<UnreachableInst>(PredBB->getTerminator()) &&
835 "The successor list of PredBB isn't empty before "
836 "applying corresponding DTU updates.");
837 DTU->applyUpdatesPermissive(Updates);
838 DTU->deleteBB(PredBB);
839 // Recalculation of DomTree is needed when updating a forward DomTree and
840 // the Entry BB is replaced.
841 if (ReplaceEntryBB && DTU->hasDomTree()) {
842 // The entry block was removed and there is no external interface for
843 // the dominator tree to be notified of this change. In this corner-case
844 // we recalculate the entire tree.
845 DTU->recalculate(*(DestBB->getParent()));
846 }
847 }
848
849 else {
850 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
851 }
852}
853
854/// Return true if we can choose one of these values to use in place of the
855/// other. Note that we will always choose the non-undef value to keep.
856static bool CanMergeValues(Value *First, Value *Second) {
857 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
858}
859
860/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
861/// branch to Succ, into Succ.
862///
863/// Assumption: Succ is the single successor for BB.
864static bool
866 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
867 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
868
869 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
870 << Succ->getName() << "\n");
871 // Shortcut, if there is only a single predecessor it must be BB and merging
872 // is always safe
873 if (Succ->getSinglePredecessor())
874 return true;
875
876 // Look at all the phi nodes in Succ, to see if they present a conflict when
877 // merging these blocks
878 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
879 PHINode *PN = cast<PHINode>(I);
880
881 // If the incoming value from BB is again a PHINode in
882 // BB which has the same incoming value for *PI as PN does, we can
883 // merge the phi nodes and then the blocks can still be merged
884 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
885 if (BBPN && BBPN->getParent() == BB) {
886 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
887 BasicBlock *IBB = PN->getIncomingBlock(PI);
888 if (BBPreds.count(IBB) &&
890 PN->getIncomingValue(PI))) {
892 << "Can't fold, phi node " << PN->getName() << " in "
893 << Succ->getName() << " is conflicting with "
894 << BBPN->getName() << " with regard to common predecessor "
895 << IBB->getName() << "\n");
896 return false;
897 }
898 }
899 } else {
900 Value* Val = PN->getIncomingValueForBlock(BB);
901 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
902 // See if the incoming value for the common predecessor is equal to the
903 // one for BB, in which case this phi node will not prevent the merging
904 // of the block.
905 BasicBlock *IBB = PN->getIncomingBlock(PI);
906 if (BBPreds.count(IBB) &&
907 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
908 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
909 << " in " << Succ->getName()
910 << " is conflicting with regard to common "
911 << "predecessor " << IBB->getName() << "\n");
912 return false;
913 }
914 }
915 }
916 }
917
918 return true;
919}
920
923
924/// Determines the value to use as the phi node input for a block.
925///
926/// Select between \p OldVal any value that we know flows from \p BB
927/// to a particular phi on the basis of which one (if either) is not
928/// undef. Update IncomingValues based on the selected value.
929///
930/// \param OldVal The value we are considering selecting.
931/// \param BB The block that the value flows in from.
932/// \param IncomingValues A map from block-to-value for other phi inputs
933/// that we have examined.
934///
935/// \returns the selected value.
937 IncomingValueMap &IncomingValues) {
938 if (!isa<UndefValue>(OldVal)) {
939 assert((!IncomingValues.count(BB) ||
940 IncomingValues.find(BB)->second == OldVal) &&
941 "Expected OldVal to match incoming value from BB!");
942
943 IncomingValues.insert(std::make_pair(BB, OldVal));
944 return OldVal;
945 }
946
947 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
948 if (It != IncomingValues.end()) return It->second;
949
950 return OldVal;
951}
952
953/// Create a map from block to value for the operands of a
954/// given phi.
955///
956/// Create a map from block to value for each non-undef value flowing
957/// into \p PN.
958///
959/// \param PN The phi we are collecting the map for.
960/// \param IncomingValues [out] The map from block to value for this phi.
962 IncomingValueMap &IncomingValues) {
963 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
964 BasicBlock *BB = PN->getIncomingBlock(i);
965 Value *V = PN->getIncomingValue(i);
966
967 if (!isa<UndefValue>(V))
968 IncomingValues.insert(std::make_pair(BB, V));
969 }
970}
971
972/// Replace the incoming undef values to a phi with the values
973/// from a block-to-value map.
974///
975/// \param PN The phi we are replacing the undefs in.
976/// \param IncomingValues A map from block to value.
978 const IncomingValueMap &IncomingValues) {
979 SmallVector<unsigned> TrueUndefOps;
980 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
981 Value *V = PN->getIncomingValue(i);
982
983 if (!isa<UndefValue>(V)) continue;
984
985 BasicBlock *BB = PN->getIncomingBlock(i);
986 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
987
988 // Keep track of undef/poison incoming values. Those must match, so we fix
989 // them up below if needed.
990 // Note: this is conservatively correct, but we could try harder and group
991 // the undef values per incoming basic block.
992 if (It == IncomingValues.end()) {
993 TrueUndefOps.push_back(i);
994 continue;
995 }
996
997 // There is a defined value for this incoming block, so map this undef
998 // incoming value to the defined value.
999 PN->setIncomingValue(i, It->second);
1000 }
1001
1002 // If there are both undef and poison values incoming, then convert those
1003 // values to undef. It is invalid to have different values for the same
1004 // incoming block.
1005 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1006 return isa<PoisonValue>(PN->getIncomingValue(i));
1007 });
1008 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1009 for (unsigned i : TrueUndefOps)
1011 }
1012}
1013
1014// Only when they shares a single common predecessor, return true.
1015// Only handles cases when BB can't be merged while its predecessors can be
1016// redirected.
1017static bool
1019 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1020 const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
1021 BasicBlock *&CommonPred) {
1022
1023 // There must be phis in BB, otherwise BB will be merged into Succ directly
1024 if (BB->phis().empty() || Succ->phis().empty())
1025 return false;
1026
1027 // BB must have predecessors not shared that can be redirected to Succ
1028 if (!BB->hasNPredecessorsOrMore(2))
1029 return false;
1030
1031 // Get single common predecessors of both BB and Succ
1032 for (BasicBlock *SuccPred : SuccPreds) {
1033 if (BBPreds.count(SuccPred)) {
1034 if (CommonPred)
1035 return false;
1036 CommonPred = SuccPred;
1037 }
1038 }
1039
1040 return true;
1041}
1042
1043/// Replace a value flowing from a block to a phi with
1044/// potentially multiple instances of that value flowing from the
1045/// block's predecessors to the phi.
1046///
1047/// \param BB The block with the value flowing into the phi.
1048/// \param BBPreds The predecessors of BB.
1049/// \param PN The phi that we are updating.
1050/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1052 const PredBlockVector &BBPreds,
1053 PHINode *PN,
1054 BasicBlock *CommonPred) {
1055 Value *OldVal = PN->removeIncomingValue(BB, false);
1056 assert(OldVal && "No entry in PHI for Pred BB!");
1057
1058 IncomingValueMap IncomingValues;
1059
1060 // We are merging two blocks - BB, and the block containing PN - and
1061 // as a result we need to redirect edges from the predecessors of BB
1062 // to go to the block containing PN, and update PN
1063 // accordingly. Since we allow merging blocks in the case where the
1064 // predecessor and successor blocks both share some predecessors,
1065 // and where some of those common predecessors might have undef
1066 // values flowing into PN, we want to rewrite those values to be
1067 // consistent with the non-undef values.
1068
1069 gatherIncomingValuesToPhi(PN, IncomingValues);
1070
1071 // If this incoming value is one of the PHI nodes in BB, the new entries
1072 // in the PHI node are the entries from the old PHI.
1073 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1074 PHINode *OldValPN = cast<PHINode>(OldVal);
1075 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1076 // Note that, since we are merging phi nodes and BB and Succ might
1077 // have common predecessors, we could end up with a phi node with
1078 // identical incoming branches. This will be cleaned up later (and
1079 // will trigger asserts if we try to clean it up now, without also
1080 // simplifying the corresponding conditional branch).
1081 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1082
1083 if (PredBB == CommonPred)
1084 continue;
1085
1086 Value *PredVal = OldValPN->getIncomingValue(i);
1087 Value *Selected =
1088 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1089
1090 // And add a new incoming value for this predecessor for the
1091 // newly retargeted branch.
1092 PN->addIncoming(Selected, PredBB);
1093 }
1094 if (CommonPred)
1095 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1096
1097 } else {
1098 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1099 // Update existing incoming values in PN for this
1100 // predecessor of BB.
1101 BasicBlock *PredBB = BBPreds[i];
1102
1103 if (PredBB == CommonPred)
1104 continue;
1105
1106 Value *Selected =
1107 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1108
1109 // And add a new incoming value for this predecessor for the
1110 // newly retargeted branch.
1111 PN->addIncoming(Selected, PredBB);
1112 }
1113 if (CommonPred)
1114 PN->addIncoming(OldVal, BB);
1115 }
1116
1117 replaceUndefValuesInPhi(PN, IncomingValues);
1118}
1119
1121 DomTreeUpdater *DTU) {
1122 assert(BB != &BB->getParent()->getEntryBlock() &&
1123 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1124
1125 // We can't simplify infinite loops.
1126 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1127 if (BB == Succ)
1128 return false;
1129
1131 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ));
1132
1133 // The single common predecessor of BB and Succ when BB cannot be killed
1134 BasicBlock *CommonPred = nullptr;
1135
1136 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1137
1138 // Even if we can not fold bB into Succ, we may be able to redirect the
1139 // predecessors of BB to Succ.
1140 bool BBPhisMergeable =
1141 BBKillable ||
1142 CanRedirectPredsOfEmptyBBToSucc(BB, Succ, BBPreds, SuccPreds, CommonPred);
1143
1144 if (!BBKillable && !BBPhisMergeable)
1145 return false;
1146
1147 // Check to see if merging these blocks/phis would cause conflicts for any of
1148 // the phi nodes in BB or Succ. If not, we can safely merge.
1149
1150 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1151 // has uses which will not disappear when the PHI nodes are merged. It is
1152 // possible to handle such cases, but difficult: it requires checking whether
1153 // BB dominates Succ, which is non-trivial to calculate in the case where
1154 // Succ has multiple predecessors. Also, it requires checking whether
1155 // constructing the necessary self-referential PHI node doesn't introduce any
1156 // conflicts; this isn't too difficult, but the previous code for doing this
1157 // was incorrect.
1158 //
1159 // Note that if this check finds a live use, BB dominates Succ, so BB is
1160 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1161 // folding the branch isn't profitable in that case anyway.
1162 if (!Succ->getSinglePredecessor()) {
1163 BasicBlock::iterator BBI = BB->begin();
1164 while (isa<PHINode>(*BBI)) {
1165 for (Use &U : BBI->uses()) {
1166 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1167 if (PN->getIncomingBlock(U) != BB)
1168 return false;
1169 } else {
1170 return false;
1171 }
1172 }
1173 ++BBI;
1174 }
1175 }
1176
1177 if (BBPhisMergeable && CommonPred)
1178 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1179 << " and " << Succ->getName() << " : "
1180 << CommonPred->getName() << "\n");
1181
1182 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1183 // metadata.
1184 //
1185 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1186 // current status (that loop metadata is implemented as metadata attached to
1187 // the branch instruction in the loop latch block). To quote from review
1188 // comments, "the current representation of loop metadata (using a loop latch
1189 // terminator attachment) is known to be fundamentally broken. Loop latches
1190 // are not uniquely associated with loops (both in that a latch can be part of
1191 // multiple loops and a loop may have multiple latches). Loop headers are. The
1192 // solution to this problem is also known: Add support for basic block
1193 // metadata, and attach loop metadata to the loop header."
1194 //
1195 // Why bail out:
1196 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1197 // the latch for inner-loop (see reason below), so bail out to prerserve
1198 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1199 // to this inner-loop.
1200 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1201 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1202 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1203 // one self-looping basic block, which is contradictory with the assumption.
1204 //
1205 // To illustrate how inner-loop metadata is dropped:
1206 //
1207 // CFG Before
1208 //
1209 // BB is while.cond.exit, attached with loop metdata md2.
1210 // BB->Pred is for.body, attached with loop metadata md1.
1211 //
1212 // entry
1213 // |
1214 // v
1215 // ---> while.cond -------------> while.end
1216 // | |
1217 // | v
1218 // | while.body
1219 // | |
1220 // | v
1221 // | for.body <---- (md1)
1222 // | | |______|
1223 // | v
1224 // | while.cond.exit (md2)
1225 // | |
1226 // |_______|
1227 //
1228 // CFG After
1229 //
1230 // while.cond1 is the merge of while.cond.exit and while.cond above.
1231 // for.body is attached with md2, and md1 is dropped.
1232 // If LoopSimplify runs later (as a part of loop pass), it could create
1233 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1234 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1235 //
1236 // entry
1237 // |
1238 // v
1239 // ---> while.cond1 -------------> while.end
1240 // | |
1241 // | v
1242 // | while.body
1243 // | |
1244 // | v
1245 // | for.body <---- (md2)
1246 // |_______| |______|
1247 if (Instruction *TI = BB->getTerminator())
1248 if (TI->hasMetadata(LLVMContext::MD_loop))
1249 for (BasicBlock *Pred : predecessors(BB))
1250 if (Instruction *PredTI = Pred->getTerminator())
1251 if (PredTI->hasMetadata(LLVMContext::MD_loop))
1252 return false;
1253
1254 if (BBKillable)
1255 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1256 else if (BBPhisMergeable)
1257 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1258
1260
1261 if (DTU) {
1262 // To avoid processing the same predecessor more than once.
1264 // All predecessors of BB (except the common predecessor) will be moved to
1265 // Succ.
1266 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1267
1268 for (auto *PredOfBB : predecessors(BB)) {
1269 // Do not modify those common predecessors of BB and Succ
1270 if (!SuccPreds.contains(PredOfBB))
1271 if (SeenPreds.insert(PredOfBB).second)
1272 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1273 }
1274
1275 SeenPreds.clear();
1276
1277 for (auto *PredOfBB : predecessors(BB))
1278 // When BB cannot be killed, do not remove the edge between BB and
1279 // CommonPred.
1280 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1281 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1282
1283 if (BBKillable)
1284 Updates.push_back({DominatorTree::Delete, BB, Succ});
1285 }
1286
1287 if (isa<PHINode>(Succ->begin())) {
1288 // If there is more than one pred of succ, and there are PHI nodes in
1289 // the successor, then we need to add incoming edges for the PHI nodes
1290 //
1291 const PredBlockVector BBPreds(predecessors(BB));
1292
1293 // Loop over all of the PHI nodes in the successor of BB.
1294 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1295 PHINode *PN = cast<PHINode>(I);
1296 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1297 }
1298 }
1299
1300 if (Succ->getSinglePredecessor()) {
1301 // BB is the only predecessor of Succ, so Succ will end up with exactly
1302 // the same predecessors BB had.
1303 // Copy over any phi, debug or lifetime instruction.
1305 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1306 } else {
1307 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1308 // We explicitly check for such uses for merging phis.
1309 assert(PN->use_empty() && "There shouldn't be any uses here!");
1310 PN->eraseFromParent();
1311 }
1312 }
1313
1314 // If the unconditional branch we replaced contains llvm.loop metadata, we
1315 // add the metadata to the branch instructions in the predecessors.
1316 if (Instruction *TI = BB->getTerminator())
1317 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1318 for (BasicBlock *Pred : predecessors(BB))
1319 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1320
1321 if (BBKillable) {
1322 // Everything that jumped to BB now goes to Succ.
1323 BB->replaceAllUsesWith(Succ);
1324
1325 if (!Succ->hasName())
1326 Succ->takeName(BB);
1327
1328 // Clear the successor list of BB to match updates applying to DTU later.
1329 if (BB->getTerminator())
1330 BB->back().eraseFromParent();
1331
1332 new UnreachableInst(BB->getContext(), BB);
1333 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1334 "applying corresponding DTU updates.");
1335 } else if (BBPhisMergeable) {
1336 // Everything except CommonPred that jumped to BB now goes to Succ.
1337 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1338 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1339 return UseInst->getParent() != CommonPred &&
1340 BBPreds.contains(UseInst->getParent());
1341 return false;
1342 });
1343 }
1344
1345 if (DTU)
1346 DTU->applyUpdates(Updates);
1347
1348 if (BBKillable)
1349 DeleteDeadBlock(BB, DTU);
1350
1351 return true;
1352}
1353
1354static bool
1357 // This implementation doesn't currently consider undef operands
1358 // specially. Theoretically, two phis which are identical except for
1359 // one having an undef where the other doesn't could be collapsed.
1360
1361 bool Changed = false;
1362
1363 // Examine each PHI.
1364 // Note that increment of I must *NOT* be in the iteration_expression, since
1365 // we don't want to immediately advance when we restart from the beginning.
1366 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1367 ++I;
1368 // Is there an identical PHI node in this basic block?
1369 // Note that we only look in the upper square's triangle,
1370 // we already checked that the lower triangle PHI's aren't identical.
1371 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1372 if (ToRemove.contains(DuplicatePN))
1373 continue;
1374 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1375 continue;
1376 // A duplicate. Replace this PHI with the base PHI.
1377 ++NumPHICSEs;
1378 DuplicatePN->replaceAllUsesWith(PN);
1379 ToRemove.insert(DuplicatePN);
1380 Changed = true;
1381
1382 // The RAUW can change PHIs that we already visited.
1383 I = BB->begin();
1384 break; // Start over from the beginning.
1385 }
1386 }
1387 return Changed;
1388}
1389
1390static bool
1393 // This implementation doesn't currently consider undef operands
1394 // specially. Theoretically, two phis which are identical except for
1395 // one having an undef where the other doesn't could be collapsed.
1396
1397 struct PHIDenseMapInfo {
1398 static PHINode *getEmptyKey() {
1400 }
1401
1402 static PHINode *getTombstoneKey() {
1404 }
1405
1406 static bool isSentinel(PHINode *PN) {
1407 return PN == getEmptyKey() || PN == getTombstoneKey();
1408 }
1409
1410 // WARNING: this logic must be kept in sync with
1411 // Instruction::isIdenticalToWhenDefined()!
1412 static unsigned getHashValueImpl(PHINode *PN) {
1413 // Compute a hash value on the operands. Instcombine will likely have
1414 // sorted them, which helps expose duplicates, but we have to check all
1415 // the operands to be safe in case instcombine hasn't run.
1416 return static_cast<unsigned>(hash_combine(
1419 }
1420
1421 static unsigned getHashValue(PHINode *PN) {
1422#ifndef NDEBUG
1423 // If -phicse-debug-hash was specified, return a constant -- this
1424 // will force all hashing to collide, so we'll exhaustively search
1425 // the table for a match, and the assertion in isEqual will fire if
1426 // there's a bug causing equal keys to hash differently.
1427 if (PHICSEDebugHash)
1428 return 0;
1429#endif
1430 return getHashValueImpl(PN);
1431 }
1432
1433 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1434 if (isSentinel(LHS) || isSentinel(RHS))
1435 return LHS == RHS;
1436 return LHS->isIdenticalTo(RHS);
1437 }
1438
1439 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1440 // These comparisons are nontrivial, so assert that equality implies
1441 // hash equality (DenseMap demands this as an invariant).
1442 bool Result = isEqualImpl(LHS, RHS);
1443 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1445 return Result;
1446 }
1447 };
1448
1449 // Set of unique PHINodes.
1451 PHISet.reserve(4 * PHICSENumPHISmallSize);
1452
1453 // Examine each PHI.
1454 bool Changed = false;
1455 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1456 if (ToRemove.contains(PN))
1457 continue;
1458 auto Inserted = PHISet.insert(PN);
1459 if (!Inserted.second) {
1460 // A duplicate. Replace this PHI with its duplicate.
1461 ++NumPHICSEs;
1462 PN->replaceAllUsesWith(*Inserted.first);
1463 ToRemove.insert(PN);
1464 Changed = true;
1465
1466 // The RAUW can change PHIs that we already visited. Start over from the
1467 // beginning.
1468 PHISet.clear();
1469 I = BB->begin();
1470 }
1471 }
1472
1473 return Changed;
1474}
1475
1478 if (
1479#ifndef NDEBUG
1480 !PHICSEDebugHash &&
1481#endif
1485}
1486
1489 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1490 for (PHINode *PN : ToRemove)
1491 PN->eraseFromParent();
1492 return Changed;
1493}
1494
1496 const DataLayout &DL) {
1497 V = V->stripPointerCasts();
1498
1499 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1500 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1501 // than the current alignment, as the known bits calculation should have
1502 // already taken it into account. However, this is not always the case,
1503 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1504 // doesn't.
1505 Align CurrentAlign = AI->getAlign();
1506 if (PrefAlign <= CurrentAlign)
1507 return CurrentAlign;
1508
1509 // If the preferred alignment is greater than the natural stack alignment
1510 // then don't round up. This avoids dynamic stack realignment.
1511 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1512 return CurrentAlign;
1513 AI->setAlignment(PrefAlign);
1514 return PrefAlign;
1515 }
1516
1517 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1518 // TODO: as above, this shouldn't be necessary.
1519 Align CurrentAlign = GO->getPointerAlignment(DL);
1520 if (PrefAlign <= CurrentAlign)
1521 return CurrentAlign;
1522
1523 // If there is a large requested alignment and we can, bump up the alignment
1524 // of the global. If the memory we set aside for the global may not be the
1525 // memory used by the final program then it is impossible for us to reliably
1526 // enforce the preferred alignment.
1527 if (!GO->canIncreaseAlignment())
1528 return CurrentAlign;
1529
1530 if (GO->isThreadLocal()) {
1531 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1532 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1533 PrefAlign = Align(MaxTLSAlign);
1534 }
1535
1536 GO->setAlignment(PrefAlign);
1537 return PrefAlign;
1538 }
1539
1540 return Align(1);
1541}
1542
1544 const DataLayout &DL,
1545 const Instruction *CxtI,
1546 AssumptionCache *AC,
1547 const DominatorTree *DT) {
1548 assert(V->getType()->isPointerTy() &&
1549 "getOrEnforceKnownAlignment expects a pointer!");
1550
1551 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1552 unsigned TrailZ = Known.countMinTrailingZeros();
1553
1554 // Avoid trouble with ridiculously large TrailZ values, such as
1555 // those computed from a null pointer.
1556 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1557 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1558
1559 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1560
1561 if (PrefAlign && *PrefAlign > Alignment)
1562 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1563
1564 // We don't need to make any adjustment.
1565 return Alignment;
1566}
1567
1568///===---------------------------------------------------------------------===//
1569/// Dbg Intrinsic utilities
1570///
1571
1572/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1574 DIExpression *DIExpr,
1575 PHINode *APN) {
1576 // Since we can't guarantee that the original dbg.declare intrinsic
1577 // is removed by LowerDbgDeclare(), we need to make sure that we are
1578 // not inserting the same dbg.value intrinsic over and over.
1580 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1581 findDbgValues(DbgValues, APN, &DbgVariableRecords);
1582 for (auto *DVI : DbgValues) {
1583 assert(is_contained(DVI->getValues(), APN));
1584 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1585 return true;
1586 }
1587 for (auto *DVR : DbgVariableRecords) {
1588 assert(is_contained(DVR->location_ops(), APN));
1589 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1590 return true;
1591 }
1592 return false;
1593}
1594
1595/// Check if the alloc size of \p ValTy is large enough to cover the variable
1596/// (or fragment of the variable) described by \p DII.
1597///
1598/// This is primarily intended as a helper for the different
1599/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1600/// describes an alloca'd variable, so we need to use the alloc size of the
1601/// value when doing the comparison. E.g. an i1 value will be identified as
1602/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1604 const DataLayout &DL = DII->getModule()->getDataLayout();
1605 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1606 if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
1607 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1608
1609 // We can't always calculate the size of the DI variable (e.g. if it is a
1610 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1611 // intead.
1612 if (DII->isAddressOfVariable()) {
1613 // DII should have exactly 1 location when it is an address.
1614 assert(DII->getNumVariableLocationOps() == 1 &&
1615 "address of variable must have exactly 1 location operand.");
1616 if (auto *AI =
1617 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1618 if (std::optional<TypeSize> FragmentSize =
1619 AI->getAllocationSizeInBits(DL)) {
1620 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1621 }
1622 }
1623 }
1624 // Could not determine size of variable. Conservatively return false.
1625 return false;
1626}
1627// RemoveDIs: duplicate implementation of the above, using DbgVariableRecords,
1628// the replacement for dbg.values.
1630 const DataLayout &DL = DVR->getModule()->getDataLayout();
1631 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1632 if (std::optional<uint64_t> FragmentSize = DVR->getFragmentSizeInBits())
1633 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1634
1635 // We can't always calculate the size of the DI variable (e.g. if it is a
1636 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1637 // intead.
1638 if (DVR->isAddressOfVariable()) {
1639 // DVR should have exactly 1 location when it is an address.
1640 assert(DVR->getNumVariableLocationOps() == 1 &&
1641 "address of variable must have exactly 1 location operand.");
1642 if (auto *AI =
1643 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) {
1644 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1645 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1646 }
1647 }
1648 }
1649 // Could not determine size of variable. Conservatively return false.
1650 return false;
1651}
1652
1654 DILocalVariable *DIVar,
1655 DIExpression *DIExpr,
1656 const DebugLoc &NewLoc,
1657 BasicBlock::iterator Instr) {
1658 if (!UseNewDbgInfoFormat) {
1659 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1660 (Instruction *)nullptr);
1661 DbgVal.get<Instruction *>()->insertBefore(Instr);
1662 } else {
1663 // RemoveDIs: if we're using the new debug-info format, allocate a
1664 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1666 DbgVariableRecord *DV =
1667 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1668 Instr->getParent()->insertDbgRecordBefore(DV, Instr);
1669 }
1670}
1671
1673 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr,
1674 const DebugLoc &NewLoc, BasicBlock::iterator Instr) {
1675 if (!UseNewDbgInfoFormat) {
1676 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc,
1677 (Instruction *)nullptr);
1678 DbgVal.get<Instruction *>()->insertAfter(&*Instr);
1679 } else {
1680 // RemoveDIs: if we're using the new debug-info format, allocate a
1681 // DbgVariableRecord directly instead of a dbg.value intrinsic.
1683 DbgVariableRecord *DV =
1684 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1685 Instr->getParent()->insertDbgRecordAfter(DV, &*Instr);
1686 }
1687}
1688
1689/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1690/// that has an associated llvm.dbg.declare intrinsic.
1692 StoreInst *SI, DIBuilder &Builder) {
1693 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1694 auto *DIVar = DII->getVariable();
1695 assert(DIVar && "Missing variable");
1696 auto *DIExpr = DII->getExpression();
1697 Value *DV = SI->getValueOperand();
1698
1699 DebugLoc NewLoc = getDebugValueLoc(DII);
1700
1701 // If the alloca describes the variable itself, i.e. the expression in the
1702 // dbg.declare doesn't start with a dereference, we can perform the
1703 // conversion if the value covers the entire fragment of DII.
1704 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1705 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1706 // We conservatively ignore other dereferences, because the following two are
1707 // not equivalent:
1708 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1709 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1710 // The former is adding 2 to the address of the variable, whereas the latter
1711 // is adding 2 to the value of the variable. As such, we insist on just a
1712 // deref expression.
1713 bool CanConvert =
1714 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1716 if (CanConvert) {
1717 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1718 SI->getIterator());
1719 return;
1720 }
1721
1722 // FIXME: If storing to a part of the variable described by the dbg.declare,
1723 // then we want to insert a dbg.value for the corresponding fragment.
1724 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1725 << '\n');
1726 // For now, when there is a store to parts of the variable (but we do not
1727 // know which part) we insert an dbg.value intrinsic to indicate that we
1728 // know nothing about the variable's content.
1729 DV = UndefValue::get(DV->getType());
1730 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1731 SI->getIterator());
1732}
1733
1734/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1735/// that has an associated llvm.dbg.declare intrinsic.
1737 LoadInst *LI, DIBuilder &Builder) {
1738 auto *DIVar = DII->getVariable();
1739 auto *DIExpr = DII->getExpression();
1740 assert(DIVar && "Missing variable");
1741
1742 if (!valueCoversEntireFragment(LI->getType(), DII)) {
1743 // FIXME: If only referring to a part of the variable described by the
1744 // dbg.declare, then we want to insert a dbg.value for the corresponding
1745 // fragment.
1746 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1747 << *DII << '\n');
1748 return;
1749 }
1750
1751 DebugLoc NewLoc = getDebugValueLoc(DII);
1752
1753 // We are now tracking the loaded value instead of the address. In the
1754 // future if multi-location support is added to the IR, it might be
1755 // preferable to keep tracking both the loaded value and the original
1756 // address in case the alloca can not be elided.
1757 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc,
1758 LI->getIterator());
1759}
1760
1762 StoreInst *SI, DIBuilder &Builder) {
1763 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1764 auto *DIVar = DVR->getVariable();
1765 assert(DIVar && "Missing variable");
1766 auto *DIExpr = DVR->getExpression();
1767 Value *DV = SI->getValueOperand();
1768
1769 DebugLoc NewLoc = getDebugValueLoc(DVR);
1770
1771 // If the alloca describes the variable itself, i.e. the expression in the
1772 // dbg.declare doesn't start with a dereference, we can perform the
1773 // conversion if the value covers the entire fragment of DII.
1774 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1775 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1776 // We conservatively ignore other dereferences, because the following two are
1777 // not equivalent:
1778 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1779 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1780 // The former is adding 2 to the address of the variable, whereas the latter
1781 // is adding 2 to the value of the variable. As such, we insist on just a
1782 // deref expression.
1783 bool CanConvert =
1784 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1786 if (CanConvert) {
1787 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1788 SI->getIterator());
1789 return;
1790 }
1791
1792 // FIXME: If storing to a part of the variable described by the dbg.declare,
1793 // then we want to insert a dbg.value for the corresponding fragment.
1794 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1795 << '\n');
1797
1798 // For now, when there is a store to parts of the variable (but we do not
1799 // know which part) we insert an dbg.value intrinsic to indicate that we
1800 // know nothing about the variable's content.
1801 DV = UndefValue::get(DV->getType());
1803 DbgVariableRecord *NewDVR =
1804 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1805 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1806}
1807
1808/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1809/// llvm.dbg.declare intrinsic.
1811 PHINode *APN, DIBuilder &Builder) {
1812 auto *DIVar = DII->getVariable();
1813 auto *DIExpr = DII->getExpression();
1814 assert(DIVar && "Missing variable");
1815
1816 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1817 return;
1818
1819 if (!valueCoversEntireFragment(APN->getType(), DII)) {
1820 // FIXME: If only referring to a part of the variable described by the
1821 // dbg.declare, then we want to insert a dbg.value for the corresponding
1822 // fragment.
1823 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1824 << *DII << '\n');
1825 return;
1826 }
1827
1828 BasicBlock *BB = APN->getParent();
1829 auto InsertionPt = BB->getFirstInsertionPt();
1830
1831 DebugLoc NewLoc = getDebugValueLoc(DII);
1832
1833 // The block may be a catchswitch block, which does not have a valid
1834 // insertion point.
1835 // FIXME: Insert dbg.value markers in the successors when appropriate.
1836 if (InsertionPt != BB->end()) {
1837 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1838 InsertionPt);
1839 }
1840}
1841
1843 DIBuilder &Builder) {
1844 auto *DIVar = DVR->getVariable();
1845 auto *DIExpr = DVR->getExpression();
1846 assert(DIVar && "Missing variable");
1847
1848 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1849 // FIXME: If only referring to a part of the variable described by the
1850 // dbg.declare, then we want to insert a DbgVariableRecord for the
1851 // corresponding fragment.
1852 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1853 << *DVR << '\n');
1854 return;
1855 }
1856
1857 DebugLoc NewLoc = getDebugValueLoc(DVR);
1858
1859 // We are now tracking the loaded value instead of the address. In the
1860 // future if multi-location support is added to the IR, it might be
1861 // preferable to keep tracking both the loaded value and the original
1862 // address in case the alloca can not be elided.
1864
1865 // Create a DbgVariableRecord directly and insert.
1867 DbgVariableRecord *DV =
1868 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1869 LI->getParent()->insertDbgRecordAfter(DV, LI);
1870}
1871
1872/// Determine whether this alloca is either a VLA or an array.
1873static bool isArray(AllocaInst *AI) {
1874 return AI->isArrayAllocation() ||
1875 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1876}
1877
1878/// Determine whether this alloca is a structure.
1879static bool isStructure(AllocaInst *AI) {
1880 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1881}
1883 DIBuilder &Builder) {
1884 auto *DIVar = DVR->getVariable();
1885 auto *DIExpr = DVR->getExpression();
1886 assert(DIVar && "Missing variable");
1887
1888 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1889 return;
1890
1891 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1892 // FIXME: If only referring to a part of the variable described by the
1893 // dbg.declare, then we want to insert a DbgVariableRecord for the
1894 // corresponding fragment.
1895 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1896 << *DVR << '\n');
1897 return;
1898 }
1899
1900 BasicBlock *BB = APN->getParent();
1901 auto InsertionPt = BB->getFirstInsertionPt();
1902
1903 DebugLoc NewLoc = getDebugValueLoc(DVR);
1904
1905 // The block may be a catchswitch block, which does not have a valid
1906 // insertion point.
1907 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1908 if (InsertionPt != BB->end()) {
1909 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1910 InsertionPt);
1911 }
1912}
1913
1914/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1915/// of llvm.dbg.value intrinsics.
1917 bool Changed = false;
1918 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1921 for (auto &FI : F) {
1922 for (Instruction &BI : FI) {
1923 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1924 Dbgs.push_back(DDI);
1925 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1926 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1927 DVRs.push_back(&DVR);
1928 }
1929 }
1930 }
1931
1932 if (Dbgs.empty() && DVRs.empty())
1933 return Changed;
1934
1935 auto LowerOne = [&](auto *DDI) {
1936 AllocaInst *AI =
1937 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1938 // If this is an alloca for a scalar variable, insert a dbg.value
1939 // at each load and store to the alloca and erase the dbg.declare.
1940 // The dbg.values allow tracking a variable even if it is not
1941 // stored on the stack, while the dbg.declare can only describe
1942 // the stack slot (and at a lexical-scope granularity). Later
1943 // passes will attempt to elide the stack slot.
1944 if (!AI || isArray(AI) || isStructure(AI))
1945 return;
1946
1947 // A volatile load/store means that the alloca can't be elided anyway.
1948 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1949 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1950 return LI->isVolatile();
1951 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1952 return SI->isVolatile();
1953 return false;
1954 }))
1955 return;
1956
1958 WorkList.push_back(AI);
1959 while (!WorkList.empty()) {
1960 const Value *V = WorkList.pop_back_val();
1961 for (const auto &AIUse : V->uses()) {
1962 User *U = AIUse.getUser();
1963 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1964 if (AIUse.getOperandNo() == 1)
1965 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1966 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1967 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1968 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1969 // This is a call by-value or some other instruction that takes a
1970 // pointer to the variable. Insert a *value* intrinsic that describes
1971 // the variable by dereferencing the alloca.
1972 if (!CI->isLifetimeStartOrEnd()) {
1973 DebugLoc NewLoc = getDebugValueLoc(DDI);
1974 auto *DerefExpr =
1975 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1976 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1977 DerefExpr, NewLoc,
1978 CI->getIterator());
1979 }
1980 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1981 if (BI->getType()->isPointerTy())
1982 WorkList.push_back(BI);
1983 }
1984 }
1985 }
1986 DDI->eraseFromParent();
1987 Changed = true;
1988 };
1989
1990 for_each(Dbgs, LowerOne);
1991 for_each(DVRs, LowerOne);
1992
1993 if (Changed)
1994 for (BasicBlock &BB : F)
1996
1997 return Changed;
1998}
1999
2000// RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the
2001// debug-info out of the block's DbgVariableRecords rather than dbg.value
2002// intrinsics.
2003static void
2005 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2006 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
2007 if (InsertedPHIs.size() == 0)
2008 return;
2009
2010 // Map existing PHI nodes to their DbgVariableRecords.
2012 for (auto &I : *BB) {
2013 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
2014 for (Value *V : DVR.location_ops())
2015 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2016 DbgValueMap.insert({Loc, &DVR});
2017 }
2018 }
2019 if (DbgValueMap.size() == 0)
2020 return;
2021
2022 // Map a pair of the destination BB and old DbgVariableRecord to the new
2023 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
2024 // more than one of the inserted PHIs in the same destination BB, we can
2025 // update the same DbgVariableRecord with all the new PHIs instead of creating
2026 // one copy for each.
2028 NewDbgValueMap;
2029 // Then iterate through the new PHIs and look to see if they use one of the
2030 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
2031 // propagate the info through the new PHI. If we use more than one new PHI in
2032 // a single destination BB with the same old dbg.value, merge the updates so
2033 // that we get a single new DbgVariableRecord with all the new PHIs.
2034 for (auto PHI : InsertedPHIs) {
2035 BasicBlock *Parent = PHI->getParent();
2036 // Avoid inserting a debug-info record into an EH block.
2037 if (Parent->getFirstNonPHI()->isEHPad())
2038 continue;
2039 for (auto VI : PHI->operand_values()) {
2040 auto V = DbgValueMap.find(VI);
2041 if (V != DbgValueMap.end()) {
2042 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
2043 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2044 if (NewDI == NewDbgValueMap.end()) {
2045 DbgVariableRecord *NewDbgII = DbgII->clone();
2046 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2047 }
2048 DbgVariableRecord *NewDbgII = NewDI->second;
2049 // If PHI contains VI as an operand more than once, we may
2050 // replaced it in NewDbgII; confirm that it is present.
2051 if (is_contained(NewDbgII->location_ops(), VI))
2052 NewDbgII->replaceVariableLocationOp(VI, PHI);
2053 }
2054 }
2055 }
2056 // Insert the new DbgVariableRecords into their destination blocks.
2057 for (auto DI : NewDbgValueMap) {
2058 BasicBlock *Parent = DI.first.first;
2059 DbgVariableRecord *NewDbgII = DI.second;
2060 auto InsertionPt = Parent->getFirstInsertionPt();
2061 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2062
2063 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
2064 }
2065}
2066
2067/// Propagate dbg.value intrinsics through the newly inserted PHIs.
2069 SmallVectorImpl<PHINode *> &InsertedPHIs) {
2070 assert(BB && "No BasicBlock to clone dbg.value(s) from.");
2071 if (InsertedPHIs.size() == 0)
2072 return;
2073
2074 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs);
2075
2076 // Map existing PHI nodes to their dbg.values.
2077 ValueToValueMapTy DbgValueMap;
2078 for (auto &I : *BB) {
2079 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
2080 for (Value *V : DbgII->location_ops())
2081 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
2082 DbgValueMap.insert({Loc, DbgII});
2083 }
2084 }
2085 if (DbgValueMap.size() == 0)
2086 return;
2087
2088 // Map a pair of the destination BB and old dbg.value to the new dbg.value,
2089 // so that if a dbg.value is being rewritten to use more than one of the
2090 // inserted PHIs in the same destination BB, we can update the same dbg.value
2091 // with all the new PHIs instead of creating one copy for each.
2094 NewDbgValueMap;
2095 // Then iterate through the new PHIs and look to see if they use one of the
2096 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
2097 // propagate the info through the new PHI. If we use more than one new PHI in
2098 // a single destination BB with the same old dbg.value, merge the updates so
2099 // that we get a single new dbg.value with all the new PHIs.
2100 for (auto *PHI : InsertedPHIs) {
2101 BasicBlock *Parent = PHI->getParent();
2102 // Avoid inserting an intrinsic into an EH block.
2103 if (Parent->getFirstNonPHI()->isEHPad())
2104 continue;
2105 for (auto *VI : PHI->operand_values()) {
2106 auto V = DbgValueMap.find(VI);
2107 if (V != DbgValueMap.end()) {
2108 auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
2109 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
2110 if (NewDI == NewDbgValueMap.end()) {
2111 auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
2112 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
2113 }
2114 DbgVariableIntrinsic *NewDbgII = NewDI->second;
2115 // If PHI contains VI as an operand more than once, we may
2116 // replaced it in NewDbgII; confirm that it is present.
2117 if (is_contained(NewDbgII->location_ops(), VI))
2118 NewDbgII->replaceVariableLocationOp(VI, PHI);
2119 }
2120 }
2121 }
2122 // Insert thew new dbg.values into their destination blocks.
2123 for (auto DI : NewDbgValueMap) {
2124 BasicBlock *Parent = DI.first.first;
2125 auto *NewDbgII = DI.second;
2126 auto InsertionPt = Parent->getFirstInsertionPt();
2127 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
2128 NewDbgII->insertBefore(&*InsertionPt);
2129 }
2130}
2131
2132bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
2133 DIBuilder &Builder, uint8_t DIExprFlags,
2134 int Offset) {
2135 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address);
2137
2138 auto ReplaceOne = [&](auto *DII) {
2139 assert(DII->getVariable() && "Missing variable");
2140 auto *DIExpr = DII->getExpression();
2141 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
2142 DII->setExpression(DIExpr);
2143 DII->replaceVariableLocationOp(Address, NewAddress);
2144 };
2145
2146 for_each(DbgDeclares, ReplaceOne);
2147 for_each(DVRDeclares, ReplaceOne);
2148
2149 return !DbgDeclares.empty() || !DVRDeclares.empty();
2150}
2151
2153 DILocalVariable *DIVar,
2154 DIExpression *DIExpr, Value *NewAddress,
2155 DbgValueInst *DVI,
2156 DbgVariableRecord *DVR,
2157 DIBuilder &Builder, int Offset) {
2158 assert(DIVar && "Missing variable");
2159
2160 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
2161 // should do with the alloca pointer is dereference it. Otherwise we don't
2162 // know how to handle it and give up.
2163 if (!DIExpr || DIExpr->getNumElements() < 1 ||
2164 DIExpr->getElement(0) != dwarf::DW_OP_deref)
2165 return;
2166
2167 // Insert the offset before the first deref.
2168 if (Offset)
2169 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
2170
2171 if (DVI) {
2172 DVI->setExpression(DIExpr);
2173 DVI->replaceVariableLocationOp(0u, NewAddress);
2174 } else {
2175 assert(DVR);
2176 DVR->setExpression(DIExpr);
2177 DVR->replaceVariableLocationOp(0u, NewAddress);
2178 }
2179}
2180
2182 DIBuilder &Builder, int Offset) {
2185 findDbgValues(DbgUsers, AI, &DPUsers);
2186
2187 // Attempt to replace dbg.values that use this alloca.
2188 for (auto *DVI : DbgUsers)
2189 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(),
2190 DVI->getExpression(), NewAllocaAddress, DVI,
2191 nullptr, Builder, Offset);
2192
2193 // Replace any DbgVariableRecords that use this alloca.
2194 for (DbgVariableRecord *DVR : DPUsers)
2195 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2196 DVR->getExpression(), NewAllocaAddress, nullptr,
2197 DVR, Builder, Offset);
2198}
2199
2200/// Where possible to salvage debug information for \p I do so.
2201/// If not possible mark undef.
2205 findDbgUsers(DbgUsers, &I, &DPUsers);
2206 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers);
2207}
2208
2209template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2210 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2211 // Only instructions can be salvaged at the moment.
2212 if (!I)
2213 return;
2214
2215 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2216 "address-expression shouldn't have fragment info");
2217
2218 // The address component of a dbg.assign cannot be variadic.
2219 uint64_t CurrentLocOps = 0;
2220 SmallVector<Value *, 4> AdditionalValues;
2222 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2223
2224 // Check if the salvage failed.
2225 if (!NewV)
2226 return;
2227
2229 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2230 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2231 "address-expression shouldn't have fragment info");
2232
2233 // Salvage succeeds if no additional values are required.
2234 if (AdditionalValues.empty()) {
2235 Assign->setAddress(NewV);
2236 Assign->setAddressExpression(SalvagedExpr);
2237 } else {
2238 Assign->setKillAddress();
2239 }
2240}
2241
2245 // These are arbitrary chosen limits on the maximum number of values and the
2246 // maximum size of a debug expression we can salvage up to, used for
2247 // performance reasons.
2248 const unsigned MaxDebugArgs = 16;
2249 const unsigned MaxExpressionSize = 128;
2250 bool Salvaged = false;
2251
2252 for (auto *DII : DbgUsers) {
2253 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
2254 if (DAI->getAddress() == &I) {
2256 Salvaged = true;
2257 }
2258 if (DAI->getValue() != &I)
2259 continue;
2260 }
2261
2262 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
2263 // pointing out the value as a DWARF memory location description.
2264 bool StackValue = isa<DbgValueInst>(DII);
2265 auto DIILocation = DII->location_ops();
2266 assert(
2267 is_contained(DIILocation, &I) &&
2268 "DbgVariableIntrinsic must use salvaged instruction as its location");
2269 SmallVector<Value *, 4> AdditionalValues;
2270 // `I` may appear more than once in DII's location ops, and each use of `I`
2271 // must be updated in the DIExpression and potentially have additional
2272 // values added; thus we call salvageDebugInfoImpl for each `I` instance in
2273 // DIILocation.
2274 Value *Op0 = nullptr;
2275 DIExpression *SalvagedExpr = DII->getExpression();
2276 auto LocItr = find(DIILocation, &I);
2277 while (SalvagedExpr && LocItr != DIILocation.end()) {
2279 unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
2280 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2281 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2282 if (!Op0)
2283 break;
2284 SalvagedExpr =
2285 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2286 LocItr = std::find(++LocItr, DIILocation.end(), &I);
2287 }
2288 // salvageDebugInfoImpl should fail on examining the first element of
2289 // DbgUsers, or none of them.
2290 if (!Op0)
2291 break;
2292
2293 DII->replaceVariableLocationOp(&I, Op0);
2294 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
2295 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2296 DII->setExpression(SalvagedExpr);
2297 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
2298 DII->getNumVariableLocationOps() + AdditionalValues.size() <=
2299 MaxDebugArgs) {
2300 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2301 } else {
2302 // Do not salvage using DIArgList for dbg.declare, as it is not currently
2303 // supported in those instructions. Also do not salvage if the resulting
2304 // DIArgList would contain an unreasonably large number of values.
2305 DII->setKillLocation();
2306 }
2307 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
2308 Salvaged = true;
2309 }
2310 // Duplicate of above block for DbgVariableRecords.
2311 for (auto *DVR : DPUsers) {
2312 if (DVR->isDbgAssign()) {
2313 if (DVR->getAddress() == &I) {
2315 Salvaged = true;
2316 }
2317 if (DVR->getValue() != &I)
2318 continue;
2319 }
2320
2321 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2322 // are implicitly pointing out the value as a DWARF memory location
2323 // description.
2324 bool StackValue =
2325 DVR->getType() != DbgVariableRecord::LocationType::Declare;
2326 auto DVRLocation = DVR->location_ops();
2327 assert(
2328 is_contained(DVRLocation, &I) &&
2329 "DbgVariableIntrinsic must use salvaged instruction as its location");
2330 SmallVector<Value *, 4> AdditionalValues;
2331 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2332 // must be updated in the DIExpression and potentially have additional
2333 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2334 // DVRLocation.
2335 Value *Op0 = nullptr;
2336 DIExpression *SalvagedExpr = DVR->getExpression();
2337 auto LocItr = find(DVRLocation, &I);
2338 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2340 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2341 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2342 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2343 if (!Op0)
2344 break;
2345 SalvagedExpr =
2346 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2347 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2348 }
2349 // salvageDebugInfoImpl should fail on examining the first element of
2350 // DbgUsers, or none of them.
2351 if (!Op0)
2352 break;
2353
2354 DVR->replaceVariableLocationOp(&I, Op0);
2355 bool IsValidSalvageExpr =
2356 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2357 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2358 DVR->setExpression(SalvagedExpr);
2359 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2360 IsValidSalvageExpr &&
2361 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2362 MaxDebugArgs) {
2363 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2364 } else {
2365 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2366 // currently only valid for stack value expressions.
2367 // Also do not salvage if the resulting DIArgList would contain an
2368 // unreasonably large number of values.
2369 DVR->setKillLocation();
2370 }
2371 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2372 Salvaged = true;
2373 }
2374
2375 if (Salvaged)
2376 return;
2377
2378 for (auto *DII : DbgUsers)
2379 DII->setKillLocation();
2380
2381 for (auto *DVR : DPUsers)
2382 DVR->setKillLocation();
2383}
2384
2386 uint64_t CurrentLocOps,
2388 SmallVectorImpl<Value *> &AdditionalValues) {
2389 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2390 // Rewrite a GEP into a DIExpression.
2391 MapVector<Value *, APInt> VariableOffsets;
2392 APInt ConstantOffset(BitWidth, 0);
2393 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2394 return nullptr;
2395 if (!VariableOffsets.empty() && !CurrentLocOps) {
2396 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2397 CurrentLocOps = 1;
2398 }
2399 for (const auto &Offset : VariableOffsets) {
2400 AdditionalValues.push_back(Offset.first);
2401 assert(Offset.second.isStrictlyPositive() &&
2402 "Expected strictly positive multiplier for offset.");
2403 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2404 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2405 dwarf::DW_OP_plus});
2406 }
2407 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2408 return GEP->getOperand(0);
2409}
2410
2412 switch (Opcode) {
2413 case Instruction::Add:
2414 return dwarf::DW_OP_plus;
2415 case Instruction::Sub:
2416 return dwarf::DW_OP_minus;
2417 case Instruction::Mul:
2418 return dwarf::DW_OP_mul;
2419 case Instruction::SDiv:
2420 return dwarf::DW_OP_div;
2421 case Instruction::SRem:
2422 return dwarf::DW_OP_mod;
2423 case Instruction::Or:
2424 return dwarf::DW_OP_or;
2425 case Instruction::And:
2426 return dwarf::DW_OP_and;
2427 case Instruction::Xor:
2428 return dwarf::DW_OP_xor;
2429 case Instruction::Shl:
2430 return dwarf::DW_OP_shl;
2431 case Instruction::LShr:
2432 return dwarf::DW_OP_shr;
2433 case Instruction::AShr:
2434 return dwarf::DW_OP_shra;
2435 default:
2436 // TODO: Salvage from each kind of binop we know about.
2437 return 0;
2438 }
2439}
2440
2441static void handleSSAValueOperands(uint64_t CurrentLocOps,
2443 SmallVectorImpl<Value *> &AdditionalValues,
2444 Instruction *I) {
2445 if (!CurrentLocOps) {
2446 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2447 CurrentLocOps = 1;
2448 }
2449 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2450 AdditionalValues.push_back(I->getOperand(1));
2451}
2452
2455 SmallVectorImpl<Value *> &AdditionalValues) {
2456 // Handle binary operations with constant integer operands as a special case.
2457 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2458 // Values wider than 64 bits cannot be represented within a DIExpression.
2459 if (ConstInt && ConstInt->getBitWidth() > 64)
2460 return nullptr;
2461
2462 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2463 // Push any Constant Int operand onto the expression stack.
2464 if (ConstInt) {
2465 uint64_t Val = ConstInt->getSExtValue();
2466 // Add or Sub Instructions with a constant operand can potentially be
2467 // simplified.
2468 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2469 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2471 return BI->getOperand(0);
2472 }
2473 Opcodes.append({dwarf::DW_OP_constu, Val});
2474 } else {
2475 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2476 }
2477
2478 // Add salvaged binary operator to expression stack, if it has a valid
2479 // representation in a DIExpression.
2480 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2481 if (!DwarfBinOp)
2482 return nullptr;
2483 Opcodes.push_back(DwarfBinOp);
2484 return BI->getOperand(0);
2485}
2486
2488 // The signedness of the operation is implicit in the typed stack, signed and
2489 // unsigned instructions map to the same DWARF opcode.
2490 switch (Pred) {
2491 case CmpInst::ICMP_EQ:
2492 return dwarf::DW_OP_eq;
2493 case CmpInst::ICMP_NE:
2494 return dwarf::DW_OP_ne;
2495 case CmpInst::ICMP_UGT:
2496 case CmpInst::ICMP_SGT:
2497 return dwarf::DW_OP_gt;
2498 case CmpInst::ICMP_UGE:
2499 case CmpInst::ICMP_SGE:
2500 return dwarf::DW_OP_ge;
2501 case CmpInst::ICMP_ULT:
2502 case CmpInst::ICMP_SLT:
2503 return dwarf::DW_OP_lt;
2504 case CmpInst::ICMP_ULE:
2505 case CmpInst::ICMP_SLE:
2506 return dwarf::DW_OP_le;
2507 default:
2508 return 0;
2509 }
2510}
2511
2514 SmallVectorImpl<Value *> &AdditionalValues) {
2515 // Handle icmp operations with constant integer operands as a special case.
2516 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2517 // Values wider than 64 bits cannot be represented within a DIExpression.
2518 if (ConstInt && ConstInt->getBitWidth() > 64)
2519 return nullptr;
2520 // Push any Constant Int operand onto the expression stack.
2521 if (ConstInt) {
2522 if (Icmp->isSigned())
2523 Opcodes.push_back(dwarf::DW_OP_consts);
2524 else
2525 Opcodes.push_back(dwarf::DW_OP_constu);
2526 uint64_t Val = ConstInt->getSExtValue();
2527 Opcodes.push_back(Val);
2528 } else {
2529 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2530 }
2531
2532 // Add salvaged binary operator to expression stack, if it has a valid
2533 // representation in a DIExpression.
2534 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2535 if (!DwarfIcmpOp)
2536 return nullptr;
2537 Opcodes.push_back(DwarfIcmpOp);
2538 return Icmp->getOperand(0);
2539}
2540
2543 SmallVectorImpl<Value *> &AdditionalValues) {
2544 auto &M = *I.getModule();
2545 auto &DL = M.getDataLayout();
2546
2547 if (auto *CI = dyn_cast<CastInst>(&I)) {
2548 Value *FromValue = CI->getOperand(0);
2549 // No-op casts are irrelevant for debug info.
2550 if (CI->isNoopCast(DL)) {
2551 return FromValue;
2552 }
2553
2554 Type *Type = CI->getType();
2555 if (Type->isPointerTy())
2556 Type = DL.getIntPtrType(Type);
2557 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2558 if (Type->isVectorTy() ||
2559 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2560 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2561 return nullptr;
2562
2563 llvm::Type *FromType = FromValue->getType();
2564 if (FromType->isPointerTy())
2565 FromType = DL.getIntPtrType(FromType);
2566
2567 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2568 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2569
2570 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2571 isa<SExtInst>(&I));
2572 Ops.append(ExtOps.begin(), ExtOps.end());
2573 return FromValue;
2574 }
2575
2576 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2577 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2578 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2579 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2580 if (auto *IC = dyn_cast<ICmpInst>(&I))
2581 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2582
2583 // *Not* to do: we should not attempt to salvage load instructions,
2584 // because the validity and lifetime of a dbg.value containing
2585 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2586 return nullptr;
2587}
2588
2589/// A replacement for a dbg.value expression.
2590using DbgValReplacement = std::optional<DIExpression *>;
2591
2592/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2593/// possibly moving/undefing users to prevent use-before-def. Returns true if
2594/// changes are made.
2596 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2598 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2599 // Find debug users of From.
2602 findDbgUsers(Users, &From, &DPUsers);
2603 if (Users.empty() && DPUsers.empty())
2604 return false;
2605
2606 // Prevent use-before-def of To.
2607 bool Changed = false;
2608
2610 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2611 if (isa<Instruction>(&To)) {
2612 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2613
2614 for (auto *DII : Users) {
2615 // It's common to see a debug user between From and DomPoint. Move it
2616 // after DomPoint to preserve the variable update without any reordering.
2617 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2618 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
2619 DII->moveAfter(&DomPoint);
2620 Changed = true;
2621
2622 // Users which otherwise aren't dominated by the replacement value must
2623 // be salvaged or deleted.
2624 } else if (!DT.dominates(&DomPoint, DII)) {
2625 UndefOrSalvage.insert(DII);
2626 }
2627 }
2628
2629 // DbgVariableRecord implementation of the above.
2630 for (auto *DVR : DPUsers) {
2631 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2632 Instruction *NextNonDebug = MarkedInstr;
2633 // The next instruction might still be a dbg.declare, skip over it.
2634 if (isa<DbgVariableIntrinsic>(NextNonDebug))
2635 NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
2636
2637 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2638 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2639 DVR->removeFromParent();
2640 // Ensure there's a marker.
2641 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2642 Changed = true;
2643 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2644 UndefOrSalvageDVR.insert(DVR);
2645 }
2646 }
2647 }
2648
2649 // Update debug users without use-before-def risk.
2650 for (auto *DII : Users) {
2651 if (UndefOrSalvage.count(DII))
2652 continue;
2653
2654 DbgValReplacement DVRepl = RewriteExpr(*DII);
2655 if (!DVRepl)
2656 continue;
2657
2658 DII->replaceVariableLocationOp(&From, &To);
2659 DII->setExpression(*DVRepl);
2660 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n');
2661 Changed = true;
2662 }
2663 for (auto *DVR : DPUsers) {
2664 if (UndefOrSalvageDVR.count(DVR))
2665 continue;
2666
2667 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2668 if (!DVRepl)
2669 continue;
2670
2671 DVR->replaceVariableLocationOp(&From, &To);
2672 DVR->setExpression(*DVRepl);
2673 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2674 Changed = true;
2675 }
2676
2677 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) {
2678 // Try to salvage the remaining debug users.
2680 Changed = true;
2681 }
2682
2683 return Changed;
2684}
2685
2686/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2687/// losslessly preserve the bits and semantics of the value. This predicate is
2688/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2689///
2690/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2691/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2692/// and also does not allow lossless pointer <-> integer conversions.
2694 Type *ToTy) {
2695 // Trivially compatible types.
2696 if (FromTy == ToTy)
2697 return true;
2698
2699 // Handle compatible pointer <-> integer conversions.
2700 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2701 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2702 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2703 !DL.isNonIntegralPointerType(ToTy);
2704 return SameSize && LosslessConversion;
2705 }
2706
2707 // TODO: This is not exhaustive.
2708 return false;
2709}
2710
2712 Instruction &DomPoint, DominatorTree &DT) {
2713 // Exit early if From has no debug users.
2714 if (!From.isUsedByMetadata())
2715 return false;
2716
2717 assert(&From != &To && "Can't replace something with itself");
2718
2719 Type *FromTy = From.getType();
2720 Type *ToTy = To.getType();
2721
2722 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2723 return DII.getExpression();
2724 };
2725 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2726 return DVR.getExpression();
2727 };
2728
2729 // Handle no-op conversions.
2730 Module &M = *From.getModule();
2731 const DataLayout &DL = M.getDataLayout();
2732 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2733 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2734
2735 // Handle integer-to-integer widening and narrowing.
2736 // FIXME: Use DW_OP_convert when it's available everywhere.
2737 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2738 uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2739 uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2740 assert(FromBits != ToBits && "Unexpected no-op conversion");
2741
2742 // When the width of the result grows, assume that a debugger will only
2743 // access the low `FromBits` bits when inspecting the source variable.
2744 if (FromBits < ToBits)
2745 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR);
2746
2747 // The width of the result has shrunk. Use sign/zero extension to describe
2748 // the source variable's high bits.
2749 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2750 DILocalVariable *Var = DII.getVariable();
2751
2752 // Without knowing signedness, sign/zero extension isn't possible.
2753 auto Signedness = Var->getSignedness();
2754 if (!Signedness)
2755 return std::nullopt;
2756
2757 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2758 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2759 Signed);
2760 };
2761 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather
2762 // than on dbg.value intrinsics.
2763 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2764 DILocalVariable *Var = DVR.getVariable();
2765
2766 // Without knowing signedness, sign/zero extension isn't possible.
2767 auto Signedness = Var->getSignedness();
2768 if (!Signedness)
2769 return std::nullopt;
2770
2771 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2772 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2773 Signed);
2774 };
2775 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt,
2776 SignOrZeroExtDVR);
2777 }
2778
2779 // TODO: Floating-point conversions, vectors.
2780 return false;
2781}
2782
2784 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2785 bool Changed = false;
2786 // RemoveDIs: erase debug-info on this instruction manually.
2787 I->dropDbgRecords();
2788 for (Use &U : I->operands()) {
2789 Value *Op = U.get();
2790 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2791 U.set(PoisonValue::get(Op->getType()));
2792 PoisonedValues.push_back(Op);
2793 Changed = true;
2794 }
2795 }
2796
2797 return Changed;
2798}
2799
2800std::pair<unsigned, unsigned>
2802 unsigned NumDeadInst = 0;
2803 unsigned NumDeadDbgInst = 0;
2804 // Delete the instructions backwards, as it has a reduced likelihood of
2805 // having to update as many def-use and use-def chains.
2806 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2809
2810 while (EndInst != &BB->front()) {
2811 // Delete the next to last instruction.
2812 Instruction *Inst = &*--EndInst->getIterator();
2813 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2815 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2816 // EHPads can't have DbgVariableRecords attached to them, but it might be
2817 // possible for things with token type.
2818 Inst->dropDbgRecords();
2819 EndInst = Inst;
2820 continue;
2821 }
2822 if (isa<DbgInfoIntrinsic>(Inst))
2823 ++NumDeadDbgInst;
2824 else
2825 ++NumDeadInst;
2826 // RemoveDIs: erasing debug-info must be done manually.
2827 Inst->dropDbgRecords();
2828 Inst->eraseFromParent();
2829 }
2830 return {NumDeadInst, NumDeadDbgInst};
2831}
2832
2833unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2834 DomTreeUpdater *DTU,
2835 MemorySSAUpdater *MSSAU) {
2836 BasicBlock *BB = I->getParent();
2837
2838 if (MSSAU)
2839 MSSAU->changeToUnreachable(I);
2840
2841 SmallSet<BasicBlock *, 8> UniqueSuccessors;
2842
2843 // Loop over all of the successors, removing BB's entry from any PHI
2844 // nodes.
2845 for (BasicBlock *Successor : successors(BB)) {
2846 Successor->removePredecessor(BB, PreserveLCSSA);
2847 if (DTU)
2848 UniqueSuccessors.insert(Successor);
2849 }
2850 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2851 UI->setDebugLoc(I->getDebugLoc());
2852
2853 // All instructions after this are dead.
2854 unsigned NumInstrsRemoved = 0;
2855 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2856 while (BBI != BBE) {
2857 if (!BBI->use_empty())
2858 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2859 BBI++->eraseFromParent();
2860 ++NumInstrsRemoved;
2861 }
2862 if (DTU) {
2864 Updates.reserve(UniqueSuccessors.size());
2865 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2866 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2867 DTU->applyUpdates(Updates);
2868 }
2870 return NumInstrsRemoved;
2871}
2872
2874 SmallVector<Value *, 8> Args(II->args());
2876 II->getOperandBundlesAsDefs(OpBundles);
2877 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2878 II->getCalledOperand(), Args, OpBundles);
2879 NewCall->setCallingConv(II->getCallingConv());
2880 NewCall->setAttributes(II->getAttributes());
2881 NewCall->setDebugLoc(II->getDebugLoc());
2882 NewCall->copyMetadata(*II);
2883
2884 // If the invoke had profile metadata, try converting them for CallInst.
2885 uint64_t TotalWeight;
2886 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2887 // Set the total weight if it fits into i32, otherwise reset.
2888 MDBuilder MDB(NewCall->getContext());
2889 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2890 ? nullptr
2891 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2892 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2893 }
2894
2895 return NewCall;
2896}
2897
2898// changeToCall - Convert the specified invoke into a normal call.
2900 CallInst *NewCall = createCallMatchingInvoke(II);
2901 NewCall->takeName(II);
2902 NewCall->insertBefore(II);
2903 II->replaceAllUsesWith(NewCall);
2904
2905 // Follow the call by a branch to the normal destination.
2906 BasicBlock *NormalDestBB = II->getNormalDest();
2907 BranchInst::Create(NormalDestBB, II->getIterator());
2908
2909 // Update PHI nodes in the unwind destination
2910 BasicBlock *BB = II->getParent();
2911 BasicBlock *UnwindDestBB = II->getUnwindDest();
2912 UnwindDestBB->removePredecessor(BB);
2913 II->eraseFromParent();
2914 if (DTU)
2915 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2916 return NewCall;
2917}
2918
2920 BasicBlock *UnwindEdge,
2921 DomTreeUpdater *DTU) {
2922 BasicBlock *BB = CI->getParent();
2923
2924 // Convert this function call into an invoke instruction. First, split the
2925 // basic block.
2926 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2927 CI->getName() + ".noexc");
2928
2929 // Delete the unconditional branch inserted by SplitBlock
2930 BB->back().eraseFromParent();
2931
2932 // Create the new invoke instruction.
2933 SmallVector<Value *, 8> InvokeArgs(CI->args());
2935
2936 CI->getOperandBundlesAsDefs(OpBundles);
2937
2938 // Note: we're round tripping operand bundles through memory here, and that
2939 // can potentially be avoided with a cleverer API design that we do not have
2940 // as of this time.
2941
2942 InvokeInst *II =
2944 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2945 II->setDebugLoc(CI->getDebugLoc());
2946 II->setCallingConv(CI->getCallingConv());
2947 II->setAttributes(CI->getAttributes());
2948 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2949
2950 if (DTU)
2951 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2952
2953 // Make sure that anything using the call now uses the invoke! This also
2954 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2955 CI->replaceAllUsesWith(II);
2956
2957 // Delete the original call
2958 Split->front().eraseFromParent();
2959 return Split;
2960}
2961
2964 DomTreeUpdater *DTU = nullptr) {
2966 BasicBlock *BB = &F.front();
2967 Worklist.push_back(BB);
2968 Reachable.insert(BB);
2969 bool Changed = false;
2970 do {
2971 BB = Worklist.pop_back_val();
2972
2973 // Do a quick scan of the basic block, turning any obviously unreachable
2974 // instructions into LLVM unreachable insts. The instruction combining pass
2975 // canonicalizes unreachable insts into stores to null or undef.
2976 for (Instruction &I : *BB) {
2977 if (auto *CI = dyn_cast<CallInst>(&I)) {
2978 Value *Callee = CI->getCalledOperand();
2979 // Handle intrinsic calls.
2980 if (Function *F = dyn_cast<Function>(Callee)) {
2981 auto IntrinsicID = F->getIntrinsicID();
2982 // Assumptions that are known to be false are equivalent to
2983 // unreachable. Also, if the condition is undefined, then we make the
2984 // choice most beneficial to the optimizer, and choose that to also be
2985 // unreachable.
2986 if (IntrinsicID == Intrinsic::assume) {
2987 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2988 // Don't insert a call to llvm.trap right before the unreachable.
2989 changeToUnreachable(CI, false, DTU);
2990 Changed = true;
2991 break;
2992 }
2993 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2994 // A call to the guard intrinsic bails out of the current
2995 // compilation unit if the predicate passed to it is false. If the
2996 // predicate is a constant false, then we know the guard will bail
2997 // out of the current compile unconditionally, so all code following
2998 // it is dead.
2999 //
3000 // Note: unlike in llvm.assume, it is not "obviously profitable" for
3001 // guards to treat `undef` as `false` since a guard on `undef` can
3002 // still be useful for widening.
3003 if (match(CI->getArgOperand(0), m_Zero()))
3004 if (!isa<UnreachableInst>(CI->getNextNode())) {
3005 changeToUnreachable(CI->getNextNode(), false, DTU);
3006 Changed = true;
3007 break;
3008 }
3009 }
3010 } else if ((isa<ConstantPointerNull>(Callee) &&
3011 !NullPointerIsDefined(CI->getFunction(),
3012 cast<PointerType>(Callee->getType())
3013 ->getAddressSpace())) ||
3014 isa<UndefValue>(Callee)) {
3015 changeToUnreachable(CI, false, DTU);
3016 Changed = true;
3017 break;
3018 }
3019 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
3020 // If we found a call to a no-return function, insert an unreachable
3021 // instruction after it. Make sure there isn't *already* one there
3022 // though.
3023 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
3024 // Don't insert a call to llvm.trap right before the unreachable.
3025 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
3026 Changed = true;
3027 }
3028 break;
3029 }
3030 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
3031 // Store to undef and store to null are undefined and used to signal
3032 // that they should be changed to unreachable by passes that can't
3033 // modify the CFG.
3034
3035 // Don't touch volatile stores.
3036 if (SI->isVolatile()) continue;
3037
3038 Value *Ptr = SI->getOperand(1);
3039
3040 if (isa<UndefValue>(Ptr) ||
3041 (isa<ConstantPointerNull>(Ptr) &&
3042 !NullPointerIsDefined(SI->getFunction(),
3043 SI->getPointerAddressSpace()))) {
3044 changeToUnreachable(SI, false, DTU);
3045 Changed = true;
3046 break;
3047 }
3048 }
3049 }
3050
3051 Instruction *Terminator = BB->getTerminator();
3052 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
3053 // Turn invokes that call 'nounwind' functions into ordinary calls.
3054 Value *Callee = II->getCalledOperand();
3055 if ((isa<ConstantPointerNull>(Callee) &&
3056 !NullPointerIsDefined(BB->getParent())) ||
3057 isa<UndefValue>(Callee)) {
3058 changeToUnreachable(II, false, DTU);
3059 Changed = true;
3060 } else {
3061 if (II->doesNotReturn() &&
3062 !isa<UnreachableInst>(II->getNormalDest()->front())) {
3063 // If we found an invoke of a no-return function,
3064 // create a new empty basic block with an `unreachable` terminator,
3065 // and set it as the normal destination for the invoke,
3066 // unless that is already the case.
3067 // Note that the original normal destination could have other uses.
3068 BasicBlock *OrigNormalDest = II->getNormalDest();
3069 OrigNormalDest->removePredecessor(II->getParent());
3070 LLVMContext &Ctx = II->getContext();
3071 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
3072 Ctx, OrigNormalDest->getName() + ".unreachable",
3073 II->getFunction(), OrigNormalDest);
3074 new UnreachableInst(Ctx, UnreachableNormalDest);
3075 II->setNormalDest(UnreachableNormalDest);
3076 if (DTU)
3077 DTU->applyUpdates(
3078 {{DominatorTree::Delete, BB, OrigNormalDest},
3079 {DominatorTree::Insert, BB, UnreachableNormalDest}});
3080 Changed = true;
3081 }
3082 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
3083 if (II->use_empty() && !II->mayHaveSideEffects()) {
3084 // jump to the normal destination branch.
3085 BasicBlock *NormalDestBB = II->getNormalDest();
3086 BasicBlock *UnwindDestBB = II->getUnwindDest();
3087 BranchInst::Create(NormalDestBB, II->getIterator());
3088 UnwindDestBB->removePredecessor(II->getParent());
3089 II->eraseFromParent();
3090 if (DTU)
3091 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
3092 } else
3093 changeToCall(II, DTU);
3094 Changed = true;
3095 }
3096 }
3097 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
3098 // Remove catchpads which cannot be reached.
3099 struct CatchPadDenseMapInfo {
3100 static CatchPadInst *getEmptyKey() {
3102 }
3103
3104 static CatchPadInst *getTombstoneKey() {
3106 }
3107
3108 static unsigned getHashValue(CatchPadInst *CatchPad) {
3109 return static_cast<unsigned>(hash_combine_range(
3110 CatchPad->value_op_begin(), CatchPad->value_op_end()));
3111 }
3112
3113 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
3114 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
3115 RHS == getEmptyKey() || RHS == getTombstoneKey())
3116 return LHS == RHS;
3117 return LHS->isIdenticalTo(RHS);
3118 }
3119 };
3120
3121 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
3122 // Set of unique CatchPads.
3124 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
3125 HandlerSet;
3127 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
3128 E = CatchSwitch->handler_end();
3129 I != E; ++I) {
3130 BasicBlock *HandlerBB = *I;
3131 if (DTU)
3132 ++NumPerSuccessorCases[HandlerBB];
3133 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
3134 if (!HandlerSet.insert({CatchPad, Empty}).second) {
3135 if (DTU)
3136 --NumPerSuccessorCases[HandlerBB];
3137 CatchSwitch->removeHandler(I);
3138 --I;
3139 --E;
3140 Changed = true;
3141 }
3142 }
3143 if (DTU) {
3144 std::vector<DominatorTree::UpdateType> Updates;
3145 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
3146 if (I.second == 0)
3147 Updates.push_back({DominatorTree::Delete, BB, I.first});
3148 DTU->applyUpdates(Updates);
3149 }
3150 }
3151
3152 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
3153 for (BasicBlock *Successor : successors(BB))
3154 if (Reachable.insert(Successor).second)
3155 Worklist.push_back(Successor);
3156 } while (!Worklist.empty());
3157 return Changed;
3158}
3159
3161 Instruction *TI = BB->getTerminator();
3162
3163 if (auto *II = dyn_cast<InvokeInst>(TI))
3164 return changeToCall(II, DTU);
3165
3166 Instruction *NewTI;
3167 BasicBlock *UnwindDest;
3168
3169 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3170 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
3171 UnwindDest = CRI->getUnwindDest();
3172 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
3173 auto *NewCatchSwitch = CatchSwitchInst::Create(
3174 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
3175 CatchSwitch->getName(), CatchSwitch->getIterator());
3176 for (BasicBlock *PadBB : CatchSwitch->handlers())
3177 NewCatchSwitch->addHandler(PadBB);
3178
3179 NewTI = NewCatchSwitch;
3180 UnwindDest = CatchSwitch->getUnwindDest();
3181 } else {
3182 llvm_unreachable("Could not find unwind successor");
3183 }
3184
3185 NewTI->takeName(TI);
3186 NewTI->setDebugLoc(TI->getDebugLoc());
3187 UnwindDest->removePredecessor(BB);
3188 TI->replaceAllUsesWith(NewTI);
3189 TI->eraseFromParent();
3190 if (DTU)
3191 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
3192 return NewTI;
3193}
3194
3195/// removeUnreachableBlocks - Remove blocks that are not reachable, even
3196/// if they are in a dead cycle. Return true if a change was made, false
3197/// otherwise.
3199 MemorySSAUpdater *MSSAU) {
3201 bool Changed = markAliveBlocks(F, Reachable, DTU);
3202
3203 // If there are unreachable blocks in the CFG...
3204 if (Reachable.size() == F.size())
3205 return Changed;
3206
3207 assert(Reachable.size() < F.size());
3208
3209 // Are there any blocks left to actually delete?
3210 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
3211 for (BasicBlock &BB : F) {
3212 // Skip reachable basic blocks
3213 if (Reachable.count(&BB))
3214 continue;
3215 // Skip already-deleted blocks
3216 if (DTU && DTU->isBBPendingDeletion(&BB))
3217 continue;
3218 BlocksToRemove.insert(&BB);
3219 }
3220
3221 if (BlocksToRemove.empty())
3222 return Changed;
3223
3224 Changed = true;
3225 NumRemoved += BlocksToRemove.size();
3226
3227 if (MSSAU)
3228 MSSAU->removeBlocks(BlocksToRemove);
3229
3230 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
3231
3232 return Changed;
3233}
3234
3236 ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
3238 K->dropUnknownNonDebugMetadata(KnownIDs);
3239 K->getAllMetadataOtherThanDebugLoc(Metadata);
3240 for (const auto &MD : Metadata) {
3241 unsigned Kind = MD.first;
3242 MDNode *JMD = J->getMetadata(Kind);
3243 MDNode *KMD = MD.second;
3244
3245 switch (Kind) {
3246 default:
3247 K->setMetadata(Kind, nullptr); // Remove unknown metadata
3248 break;
3249 case LLVMContext::MD_dbg:
3250 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
3251 case LLVMContext::MD_DIAssignID:
3252 K->mergeDIAssignID(J);
3253 break;
3254 case LLVMContext::MD_tbaa:
3255 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
3256 break;
3257 case LLVMContext::MD_alias_scope:
3258 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
3259 break;
3260 case LLVMContext::MD_noalias:
3261 case LLVMContext::MD_mem_parallel_loop_access:
3262 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
3263 break;
3264 case LLVMContext::MD_access_group:
3265 K->setMetadata(LLVMContext::MD_access_group,
3266 intersectAccessGroups(K, J));
3267 break;
3268 case LLVMContext::MD_range:
3269 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3270 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
3271 break;
3272 case LLVMContext::MD_fpmath:
3273 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
3274 break;
3275 case LLVMContext::MD_invariant_load:
3276 // If K moves, only set the !invariant.load if it is present in both
3277 // instructions.
3278 if (DoesKMove)
3279 K->setMetadata(Kind, JMD);
3280 break;
3281 case LLVMContext::MD_nonnull:
3282 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3283 K->setMetadata(Kind, JMD);
3284 break;
3285 case LLVMContext::MD_invariant_group:
3286 // Preserve !invariant.group in K.
3287 break;
3288 case LLVMContext::MD_mmra:
3289 // Combine MMRAs
3290 break;
3291 case LLVMContext::MD_align:
3292 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
3293 K->setMetadata(
3295 break;
3296 case LLVMContext::MD_dereferenceable:
3297 case LLVMContext::MD_dereferenceable_or_null:
3298 if (DoesKMove)
3299 K->setMetadata(Kind,
3301 break;
3302 case LLVMContext::MD_preserve_access_index:
3303 // Preserve !preserve.access.index in K.
3304 break;
3305 case LLVMContext::MD_noundef:
3306 // If K does move, keep noundef if it is present in both instructions.
3307 if (DoesKMove)
3308 K->setMetadata(Kind, JMD);
3309 break;
3310 case LLVMContext::MD_nontemporal:
3311 // Preserve !nontemporal if it is present on both instructions.
3312 K->setMetadata(Kind, JMD);
3313 break;
3314 case LLVMContext::MD_prof:
3315 if (DoesKMove)
3316 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
3317 break;
3318 }
3319 }
3320 // Set !invariant.group from J if J has it. If both instructions have it
3321 // then we will just pick it from J - even when they are different.
3322 // Also make sure that K is load or store - f.e. combining bitcast with load
3323 // could produce bitcast with invariant.group metadata, which is invalid.
3324 // FIXME: we should try to preserve both invariant.group md if they are
3325 // different, but right now instruction can only have one invariant.group.
3326 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3327 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3328 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3329
3330 // Merge MMRAs.
3331 // This is handled separately because we also want to handle cases where K
3332 // doesn't have tags but J does.
3333 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3334 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3335 if (JMMRA || KMMRA) {
3336 K->setMetadata(LLVMContext::MD_mmra,
3337 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3338 }
3339}
3340
3342 bool KDominatesJ) {
3343 unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
3344 LLVMContext::MD_alias_scope,
3345 LLVMContext::MD_noalias,
3346 LLVMContext::MD_range,
3347 LLVMContext::MD_fpmath,
3348 LLVMContext::MD_invariant_load,
3349 LLVMContext::MD_nonnull,
3350 LLVMContext::MD_invariant_group,
3351 LLVMContext::MD_align,
3352 LLVMContext::MD_dereferenceable,
3353 LLVMContext::MD_dereferenceable_or_null,
3354 LLVMContext::MD_access_group,
3355 LLVMContext::MD_preserve_access_index,
3356 LLVMContext::MD_prof,
3357 LLVMContext::MD_nontemporal,
3358 LLVMContext::MD_noundef,
3359 LLVMContext::MD_mmra};
3360 combineMetadata(K, J, KnownIDs, KDominatesJ);
3361}
3362
3363void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3365 Source.getAllMetadata(MD);
3366 MDBuilder MDB(Dest.getContext());
3367 Type *NewType = Dest.getType();
3368 const DataLayout &DL = Source.getModule()->getDataLayout();
3369 for (const auto &MDPair : MD) {
3370 unsigned ID = MDPair.first;
3371 MDNode *N = MDPair.second;
3372 // Note, essentially every kind of metadata should be preserved here! This
3373 // routine is supposed to clone a load instruction changing *only its type*.
3374 // The only metadata it makes sense to drop is metadata which is invalidated
3375 // when the pointer type changes. This should essentially never be the case
3376 // in LLVM, but we explicitly switch over only known metadata to be
3377 // conservatively correct. If you are adding metadata to LLVM which pertains
3378 // to loads, you almost certainly want to add it here.
3379 switch (ID) {
3380 case LLVMContext::MD_dbg:
3381 case LLVMContext::MD_tbaa:
3382 case LLVMContext::MD_prof:
3383 case LLVMContext::MD_fpmath:
3384 case LLVMContext::MD_tbaa_struct:
3385 case LLVMContext::MD_invariant_load:
3386 case LLVMContext::MD_alias_scope:
3387 case LLVMContext::MD_noalias:
3388 case LLVMContext::MD_nontemporal:
3389 case LLVMContext::MD_mem_parallel_loop_access:
3390 case LLVMContext::MD_access_group:
3391 case LLVMContext::MD_noundef:
3392 // All of these directly apply.
3393 Dest.setMetadata(ID, N);
3394 break;
3395
3396 case LLVMContext::MD_nonnull:
3397 copyNonnullMetadata(Source, N, Dest);
3398 break;
3399
3400 case LLVMContext::MD_align:
3401 case LLVMContext::MD_dereferenceable:
3402 case LLVMContext::MD_dereferenceable_or_null:
3403 // These only directly apply if the new type is also a pointer.
3404 if (NewType->isPointerTy())
3405 Dest.setMetadata(ID, N);
3406 break;
3407
3408 case LLVMContext::MD_range:
3409 copyRangeMetadata(DL, Source, N, Dest);
3410 break;
3411 }
3412 }
3413}
3414
3416 auto *ReplInst = dyn_cast<Instruction>(Repl);
3417 if (!ReplInst)
3418 return;
3419
3420 // Patch the replacement so that it is not more restrictive than the value
3421 // being replaced.
3422 WithOverflowInst *UnusedWO;
3423 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3424 // overflowing binary operator, nuw/nsw flags may no longer hold.
3425 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3426 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO))))
3427 ReplInst->dropPoisonGeneratingFlags();
3428 // Note that if 'I' is a load being replaced by some operation,
3429 // for example, by an arithmetic operation, then andIRFlags()
3430 // would just erase all math flags from the original arithmetic
3431 // operation, which is clearly not wanted and not needed.
3432 else if (!isa<LoadInst>(I))
3433 ReplInst->andIRFlags(I);
3434
3435 // FIXME: If both the original and replacement value are part of the
3436 // same control-flow region (meaning that the execution of one
3437 // guarantees the execution of the other), then we can combine the
3438 // noalias scopes here and do better than the general conservative
3439 // answer used in combineMetadata().
3440
3441 // In general, GVN unifies expressions over different control-flow
3442 // regions, and so we need a conservative combination of the noalias
3443 // scopes.
3444 combineMetadataForCSE(ReplInst, I, false);
3445}
3446
3447template <typename RootType, typename DominatesFn>
3449 const RootType &Root,
3450 const DominatesFn &Dominates) {
3451 assert(From->getType() == To->getType());
3452
3453 unsigned Count = 0;
3454 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3455 if (!Dominates(Root, U))
3456 continue;
3457 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3458 From->printAsOperand(dbgs());
3459 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3460 U.set(To);
3461 ++Count;
3462 }
3463 return Count;
3464}
3465
3467 assert(From->getType() == To->getType());
3468 auto *BB = From->getParent();
3469 unsigned Count = 0;
3470
3471 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3472 auto *I = cast<Instruction>(U.getUser());
3473 if (I->getParent() == BB)
3474 continue;
3475 U.set(To);
3476 ++Count;
3477 }
3478 return Count;
3479}
3480
3482 DominatorTree &DT,
3483 const BasicBlockEdge &Root) {
3484 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
3485 return DT.dominates(Root, U);
3486 };
3487 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
3488}
3489
3491 DominatorTree &DT,
3492 const BasicBlock *BB) {
3493 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
3494 return DT.dominates(BB, U);
3495 };
3496 return ::replaceDominatedUsesWith(From, To, BB, Dominates);
3497}
3498
3500 const TargetLibraryInfo &TLI) {
3501 // Check if the function is specifically marked as a gc leaf function.
3502 if (Call->hasFnAttr("gc-leaf-function"))
3503 return true;
3504 if (const Function *F = Call->getCalledFunction()) {
3505 if (F->hasFnAttribute("gc-leaf-function"))
3506 return true;
3507
3508 if (auto IID = F->getIntrinsicID()) {
3509 // Most LLVM intrinsics do not take safepoints.
3510 return IID != Intrinsic::experimental_gc_statepoint &&
3511 IID != Intrinsic::experimental_deoptimize &&
3512 IID != Intrinsic::memcpy_element_unordered_atomic &&
3513 IID != Intrinsic::memmove_element_unordered_atomic;
3514 }
3515 }
3516
3517 // Lib calls can be materialized by some passes, and won't be
3518 // marked as 'gc-leaf-function.' All available Libcalls are
3519 // GC-leaf.
3520 LibFunc LF;
3521 if (TLI.getLibFunc(*Call, LF)) {
3522 return TLI.has(LF);
3523 }
3524
3525 return false;
3526}
3527
3529 LoadInst &NewLI) {
3530 auto *NewTy = NewLI.getType();
3531
3532 // This only directly applies if the new type is also a pointer.
3533 if (NewTy->isPointerTy()) {
3534 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3535 return;
3536 }
3537
3538 // The only other translation we can do is to integral loads with !range
3539 // metadata.
3540 if (!NewTy->isIntegerTy())
3541 return;
3542
3543 MDBuilder MDB(NewLI.getContext());
3544 const Value *Ptr = OldLI.getPointerOperand();
3545 auto *ITy = cast<IntegerType>(NewTy);
3546 auto *NullInt = ConstantExpr::getPtrToInt(
3547 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3548 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3549 NewLI.setMetadata(LLVMContext::MD_range,
3550 MDB.createRange(NonNullInt, NullInt));
3551}
3552
3554 MDNode *N, LoadInst &NewLI) {
3555 auto *NewTy = NewLI.getType();
3556 // Simply copy the metadata if the type did not change.
3557 if (NewTy == OldLI.getType()) {
3558 NewLI.setMetadata(LLVMContext::MD_range, N);
3559 return;
3560 }
3561
3562 // Give up unless it is converted to a pointer where there is a single very
3563 // valuable mapping we can do reliably.
3564 // FIXME: It would be nice to propagate this in more ways, but the type
3565 // conversions make it hard.
3566 if (!NewTy->isPointerTy())
3567 return;
3568
3569 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3570 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3572 MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3573 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3574 }
3575}
3576
3580 findDbgUsers(DbgUsers, &I, &DPUsers);
3581 for (auto *DII : DbgUsers)
3582 DII->eraseFromParent();
3583 for (auto *DVR : DPUsers)
3584 DVR->eraseFromParent();
3585}
3586
3588 BasicBlock *BB) {
3589 // Since we are moving the instructions out of its basic block, we do not
3590 // retain their original debug locations (DILocations) and debug intrinsic
3591 // instructions.
3592 //
3593 // Doing so would degrade the debugging experience and adversely affect the
3594 // accuracy of profiling information.
3595 //
3596 // Currently, when hoisting the instructions, we take the following actions:
3597 // - Remove their debug intrinsic instructions.
3598 // - Set their debug locations to the values from the insertion point.
3599 //
3600 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3601 // need to be deleted, is because there will not be any instructions with a
3602 // DILocation in either branch left after performing the transformation. We
3603 // can only insert a dbg.value after the two branches are joined again.
3604 //
3605 // See PR38762, PR39243 for more details.
3606 //
3607 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3608 // encode predicated DIExpressions that yield different results on different
3609 // code paths.
3610
3611 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3612 Instruction *I = &*II;
3613 I->dropUBImplyingAttrsAndMetadata();
3614 if (I->isUsedByMetadata())
3615 dropDebugUsers(*I);
3616 // RemoveDIs: drop debug-info too as the following code does.
3617 I->dropDbgRecords();
3618 if (I->isDebugOrPseudoInst()) {
3619 // Remove DbgInfo and pseudo probe Intrinsics.
3620 II = I->eraseFromParent();
3621 continue;
3622 }
3623 I->setDebugLoc(InsertPt->getDebugLoc());
3624 ++II;
3625 }
3626 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3627 BB->getTerminator()->getIterator());
3628}
3629
3631 Type &Ty) {
3632 // Create integer constant expression.
3633 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3634 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3635 std::optional<int64_t> InitIntOpt = API.trySExtValue();
3636 return InitIntOpt ? DIB.createConstantValueExpression(
3637 static_cast<uint64_t>(*InitIntOpt))
3638 : nullptr;
3639 };
3640
3641 if (isa<ConstantInt>(C))
3642 return createIntegerExpression(C);
3643
3644 auto *FP = dyn_cast<ConstantFP>(&C);
3645 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3646 const APFloat &APF = FP->getValueAPF();
3647 APInt const &API = APF.bitcastToAPInt();
3648 if (auto Temp = API.getZExtValue())
3649 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
3650 return DIB.createConstantValueExpression(*API.getRawData());
3651 }
3652
3653 if (!Ty.isPointerTy())
3654 return nullptr;
3655
3656 if (isa<ConstantPointerNull>(C))
3657 return DIB.createConstantValueExpression(0);
3658
3659 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3660 if (CE->getOpcode() == Instruction::IntToPtr) {
3661 const Value *V = CE->getOperand(0);
3662 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3663 return createIntegerExpression(*CI);
3664 }
3665 return nullptr;
3666}
3667
3668namespace {
3669
3670/// A potential constituent of a bitreverse or bswap expression. See
3671/// collectBitParts for a fuller explanation.
3672struct BitPart {
3673 BitPart(Value *P, unsigned BW) : Provider(P) {
3674 Provenance.resize(BW);
3675 }
3676
3677 /// The Value that this is a bitreverse/bswap of.
3678 Value *Provider;
3679
3680 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3681 /// in Provider becomes bit B in the result of this expression.
3682 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3683
3684 enum { Unset = -1 };
3685};
3686
3687} // end anonymous namespace
3688
3689/// Analyze the specified subexpression and see if it is capable of providing
3690/// pieces of a bswap or bitreverse. The subexpression provides a potential
3691/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3692/// the output of the expression came from a corresponding bit in some other
3693/// value. This function is recursive, and the end result is a mapping of
3694/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3695/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3696///
3697/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3698/// that the expression deposits the low byte of %X into the high byte of the
3699/// result and that all other bits are zero. This expression is accepted and a
3700/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3701/// [0-7].
3702///
3703/// For vector types, all analysis is performed at the per-element level. No
3704/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3705/// constant masks must be splatted across all elements.
3706///
3707/// To avoid revisiting values, the BitPart results are memoized into the
3708/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3709/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3710/// store BitParts objects, not pointers. As we need the concept of a nullptr
3711/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3712/// type instead to provide the same functionality.
3713///
3714/// Because we pass around references into \c BPS, we must use a container that
3715/// does not invalidate internal references (std::map instead of DenseMap).
3716static const std::optional<BitPart> &
3717collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3718 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3719 bool &FoundRoot) {
3720 auto I = BPS.find(V);
3721 if (I != BPS.end())
3722 return I->second;
3723
3724 auto &Result = BPS[V] = std::nullopt;
3725 auto BitWidth = V->getType()->getScalarSizeInBits();
3726
3727 // Can't do integer/elements > 128 bits.
3728 if (BitWidth > 128)
3729 return Result;
3730
3731 // Prevent stack overflow by limiting the recursion depth
3733 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3734 return Result;
3735 }
3736
3737 if (auto *I = dyn_cast<Instruction>(V)) {
3738 Value *X, *Y;
3739 const APInt *C;
3740
3741 // If this is an or instruction, it may be an inner node of the bswap.
3742 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3743 // Check we have both sources and they are from the same provider.
3744 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3745 Depth + 1, FoundRoot);
3746 if (!A || !A->Provider)
3747 return Result;
3748
3749 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3750 Depth + 1, FoundRoot);
3751 if (!B || A->Provider != B->Provider)
3752 return Result;
3753
3754 // Try and merge the two together.
3755 Result = BitPart(A->Provider, BitWidth);
3756 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3757 if (A->Provenance[BitIdx] != BitPart::Unset &&
3758 B->Provenance[BitIdx] != BitPart::Unset &&
3759 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3760 return Result = std::nullopt;
3761
3762 if (A->Provenance[BitIdx] == BitPart::Unset)
3763 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3764 else
3765 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3766 }
3767
3768 return Result;
3769 }
3770
3771 // If this is a logical shift by a constant, recurse then shift the result.
3772 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3773 const APInt &BitShift = *C;
3774
3775 // Ensure the shift amount is defined.
3776 if (BitShift.uge(BitWidth))
3777 return Result;
3778
3779 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3780 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3781 return Result;
3782
3783 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3784 Depth + 1, FoundRoot);
3785 if (!Res)
3786 return Result;
3787 Result = Res;
3788
3789 // Perform the "shift" on BitProvenance.
3790 auto &P = Result->Provenance;
3791 if (I->getOpcode() == Instruction::Shl) {
3792 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3793 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3794 } else {
3795 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3796 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3797 }
3798
3799 return Result;
3800 }
3801
3802 // If this is a logical 'and' with a mask that clears bits, recurse then
3803 // unset the appropriate bits.
3804 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3805 const APInt &AndMask = *C;
3806
3807 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3808 // early exit.
3809 unsigned NumMaskedBits = AndMask.popcount();
3810 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3811 return Result;
3812
3813 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3814 Depth + 1, FoundRoot);
3815 if (!Res)
3816 return Result;
3817 Result = Res;
3818
3819 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3820 // If the AndMask is zero for this bit, clear the bit.
3821 if (AndMask[BitIdx] == 0)
3822 Result->Provenance[BitIdx] = BitPart::Unset;
3823 return Result;
3824 }
3825
3826 // If this is a zext instruction zero extend the result.
3827 if (match(V, m_ZExt(m_Value(X)))) {
3828 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3829 Depth + 1, FoundRoot);
3830 if (!Res)
3831 return Result;
3832
3833 Result = BitPart(Res->Provider, BitWidth);
3834 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3835 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3836 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3837 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3838 Result->Provenance[BitIdx] = BitPart::Unset;
3839 return Result;
3840 }
3841
3842 // If this is a truncate instruction, extract the lower bits.
3843 if (match(V, m_Trunc(m_Value(X)))) {
3844 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3845 Depth + 1, FoundRoot);
3846 if (!Res)
3847 return Result;
3848
3849 Result = BitPart(Res->Provider, BitWidth);
3850 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3851 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3852 return Result;
3853 }
3854
3855 // BITREVERSE - most likely due to us previous matching a partial
3856 // bitreverse.
3857 if (match(V, m_BitReverse(m_Value(X)))) {
3858 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3859 Depth + 1, FoundRoot);
3860 if (!Res)
3861 return Result;
3862
3863 Result = BitPart(Res->Provider, BitWidth);
3864 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3865 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3866 return Result;
3867 }
3868
3869 // BSWAP - most likely due to us previous matching a partial bswap.
3870 if (match(V, m_BSwap(m_Value(X)))) {
3871 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3872 Depth + 1, FoundRoot);
3873 if (!Res)
3874 return Result;
3875
3876 unsigned ByteWidth = BitWidth / 8;
3877 Result = BitPart(Res->Provider, BitWidth);
3878 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3879 unsigned ByteBitOfs = ByteIdx * 8;
3880 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3881 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3882 Res->Provenance[ByteBitOfs + BitIdx];
3883 }
3884 return Result;
3885 }
3886
3887 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3888 // amount (modulo).
3889 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3890 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3891 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3892 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3893 // We can treat fshr as a fshl by flipping the modulo amount.
3894 unsigned ModAmt = C->urem(BitWidth);
3895 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3896 ModAmt = BitWidth - ModAmt;
3897
3898 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3899 if (!MatchBitReversals && (ModAmt % 8) != 0)
3900 return Result;
3901
3902 // Check we have both sources and they are from the same provider.
3903 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3904 Depth + 1, FoundRoot);
3905 if (!LHS || !LHS->Provider)
3906 return Result;
3907
3908 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3909 Depth + 1, FoundRoot);
3910 if (!RHS || LHS->Provider != RHS->Provider)
3911 return Result;
3912
3913 unsigned StartBitRHS = BitWidth - ModAmt;
3914 Result = BitPart(LHS->Provider, BitWidth);
3915 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3916 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3917 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3918 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3919 return Result;
3920 }
3921 }
3922
3923 // If we've already found a root input value then we're never going to merge
3924 // these back together.
3925 if (FoundRoot)
3926 return Result;
3927
3928 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3929 // be the root input value to the bswap/bitreverse.
3930 FoundRoot = true;
3931 Result = BitPart(V, BitWidth);
3932 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3933 Result->Provenance[BitIdx] = BitIdx;
3934 return Result;
3935}
3936
3937static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3938 unsigned BitWidth) {
3939 if (From % 8 != To % 8)
3940 return false;
3941 // Convert from bit indices to byte indices and check for a byte reversal.
3942 From >>= 3;
3943 To >>= 3;
3944 BitWidth >>= 3;
3945 return From == BitWidth - To - 1;
3946}
3947
3948static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3949 unsigned BitWidth) {
3950 return From == BitWidth - To - 1;
3951}
3952
3954 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3955 SmallVectorImpl<Instruction *> &InsertedInsts) {
3956 if (!match(I, m_Or(m_Value(), m_Value())) &&
3957 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3958 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3959 !match(I, m_BSwap(m_Value())))
3960 return false;
3961 if (!MatchBSwaps && !MatchBitReversals)
3962 return false;
3963 Type *ITy = I->getType();
3964 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3965 return false; // Can't do integer/elements > 128 bits.
3966
3967 // Try to find all the pieces corresponding to the bswap.
3968 bool FoundRoot = false;
3969 std::map<Value *, std::optional<BitPart>> BPS;
3970 const auto &Res =
3971 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3972 if (!Res)
3973 return false;
3974 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3975 assert(all_of(BitProvenance,
3976 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3977 "Illegal bit provenance index");
3978
3979 // If the upper bits are zero, then attempt to perform as a truncated op.
3980 Type *DemandedTy = ITy;
3981 if (BitProvenance.back() == BitPart::Unset) {
3982 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3983 BitProvenance = BitProvenance.drop_back();
3984 if (BitProvenance.empty())
3985 return false; // TODO - handle null value?
3986 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3987 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3988 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3989 }
3990
3991 // Check BitProvenance hasn't found a source larger than the result type.
3992 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3993 if (DemandedBW > ITy->getScalarSizeInBits())
3994 return false;
3995
3996 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3997 // only byteswap values with an even number of bytes.
3998 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3999 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
4000 bool OKForBitReverse = MatchBitReversals;
4001 for (unsigned BitIdx = 0;
4002 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
4003 if (BitProvenance[BitIdx] == BitPart::Unset) {
4004 DemandedMask.clearBit(BitIdx);
4005 continue;
4006 }
4007 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
4008 DemandedBW);
4009 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
4010 BitIdx, DemandedBW);
4011 }
4012
4013 Intrinsic::ID Intrin;
4014 if (OKForBSwap)
4015 Intrin = Intrinsic::bswap;
4016 else if (OKForBitReverse)
4017 Intrin = Intrinsic::bitreverse;
4018 else
4019 return false;
4020
4021 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
4022 Value *Provider = Res->Provider;
4023
4024 // We may need to truncate the provider.
4025 if (DemandedTy != Provider->getType()) {
4026 auto *Trunc =
4027 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
4028 InsertedInsts.push_back(Trunc);
4029 Provider = Trunc;
4030 }
4031
4032 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
4033 InsertedInsts.push_back(Result);
4034
4035 if (!DemandedMask.isAllOnes()) {
4036 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
4037 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
4038 InsertedInsts.push_back(Result);
4039 }
4040
4041 // We may need to zeroextend back to the result type.
4042 if (ITy != Result->getType()) {
4043 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
4044 InsertedInsts.push_back(ExtInst);
4045 }
4046
4047 return true;
4048}
4049
4050// CodeGen has special handling for some string functions that may replace
4051// them with target-specific intrinsics. Since that'd skip our interceptors
4052// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
4053// we mark affected calls as NoBuiltin, which will disable optimization
4054// in CodeGen.
4056 CallInst *CI, const TargetLibraryInfo *TLI) {
4057 Function *F = CI->getCalledFunction();
4058 LibFunc Func;
4059 if (F && !F->hasLocalLinkage() && F->hasName() &&
4060 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
4061 !F->doesNotAccessMemory())
4062 CI->addFnAttr(Attribute::NoBuiltin);
4063}
4064
4066 // We can't have a PHI with a metadata type.
4067 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
4068 return false;
4069
4070 // Early exit.
4071 if (!isa<Constant>(I->getOperand(OpIdx)))
4072 return true;
4073
4074 switch (I->getOpcode()) {
4075 default:
4076 return true;
4077 case Instruction::Call:
4078 case Instruction::Invoke: {
4079 const auto &CB = cast<CallBase>(*I);
4080
4081 // Can't handle inline asm. Skip it.
4082 if (CB.isInlineAsm())
4083 return false;
4084
4085 // Constant bundle operands may need to retain their constant-ness for
4086 // correctness.
4087 if (CB.isBundleOperand(OpIdx))
4088 return false;
4089
4090 if (OpIdx < CB.arg_size()) {
4091 // Some variadic intrinsics require constants in the variadic arguments,
4092 // which currently aren't markable as immarg.
4093 if (isa<IntrinsicInst>(CB) &&
4094 OpIdx >= CB.getFunctionType()->getNumParams()) {
4095 // This is known to be OK for stackmap.
4096 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
4097 }
4098
4099 // gcroot is a special case, since it requires a constant argument which
4100 // isn't also required to be a simple ConstantInt.
4101 if (CB.getIntrinsicID() == Intrinsic::gcroot)
4102 return false;
4103
4104 // Some intrinsic operands are required to be immediates.
4105 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
4106 }
4107
4108 // It is never allowed to replace the call argument to an intrinsic, but it
4109 // may be possible for a call.
4110 return !isa<IntrinsicInst>(CB);
4111 }
4112 case Instruction::ShuffleVector:
4113 // Shufflevector masks are constant.
4114 return OpIdx != 2;
4115 case Instruction::Switch:
4116 case Instruction::ExtractValue:
4117 // All operands apart from the first are constant.
4118 return OpIdx == 0;
4119 case Instruction::InsertValue:
4120 // All operands apart from the first and the second are constant.
4121 return OpIdx < 2;
4122 case Instruction::Alloca:
4123 // Static allocas (constant size in the entry block) are handled by
4124 // prologue/epilogue insertion so they're free anyway. We definitely don't
4125 // want to make them non-constant.
4126 return !cast<AllocaInst>(I)->isStaticAlloca();
4127 case Instruction::GetElementPtr:
4128 if (OpIdx == 0)
4129 return true;
4131 for (auto E = std::next(It, OpIdx); It != E; ++It)
4132 if (It.isStruct())
4133 return false;
4134 return true;
4135 }
4136}
4137
4139 // First: Check if it's a constant
4140 if (Constant *C = dyn_cast<Constant>(Condition))
4141 return ConstantExpr::getNot(C);
4142
4143 // Second: If the condition is already inverted, return the original value
4144 Value *NotCondition;
4145 if (match(Condition, m_Not(m_Value(NotCondition))))
4146 return NotCondition;
4147
4148 BasicBlock *Parent = nullptr;
4149 Instruction *Inst = dyn_cast<Instruction>(Condition);
4150 if (Inst)
4151 Parent = Inst->getParent();
4152 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4153 Parent = &Arg->getParent()->getEntryBlock();
4154 assert(Parent && "Unsupported condition to invert");
4155
4156 // Third: Check all the users for an invert
4157 for (User *U : Condition->users())
4158 if (Instruction *I = dyn_cast<Instruction>(U))
4159 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4160 return I;
4161
4162 // Last option: Create a new instruction
4163 auto *Inverted =
4164 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4165 if (Inst && !isa<PHINode>(Inst))
4166 Inverted->insertAfter(Inst);
4167 else
4168 Inverted->insertBefore(&*Parent->getFirstInsertionPt());
4169 return Inverted;
4170}
4171
4173 // Note: We explicitly check for attributes rather than using cover functions
4174 // because some of the cover functions include the logic being implemented.
4175
4176 bool Changed = false;
4177 // readnone + not convergent implies nosync
4178 if (!F.hasFnAttribute(Attribute::NoSync) &&
4179 F.doesNotAccessMemory() && !F.isConvergent()) {
4180 F.setNoSync();
4181 Changed = true;
4182 }
4183
4184 // readonly implies nofree
4185 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4186 F.setDoesNotFreeMemory();
4187 Changed = true;
4188 }
4189
4190 // willreturn implies mustprogress
4191 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4192 F.setMustProgress();
4193 Changed = true;
4194 }
4195
4196 // TODO: There are a bunch of cases of restrictive memory effects we
4197 // can infer by inspecting arguments of argmemonly-ish functions.
4198
4199 return Changed;
4200}
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
bool End
Definition: ELF_riscv.cpp:480
static unsigned getHashValueImpl(SimpleValue Val)
Definition: EarlyCSE.cpp:235
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition: EarlyCSE.cpp:357
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Rewrite Partial Register Uses
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition: Local.cpp:2962
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition: Local.cpp:1603
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition: Local.cpp:2693
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition: Local.cpp:1879
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition: Local.cpp:2411
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1573
static void gatherIncomingValuesToPhi(PHINode *PN, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition: Local.cpp:961
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition: Local.cpp:2441
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition: Local.cpp:2590
static void insertDbgValueOrDbgVariableRecordAfter(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1672
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2453
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition: Local.cpp:977
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2385
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2512
static unsigned replaceDominatedUsesWith(Value *From, Value *To, const RootType &Root, const DominatesFn &Dominates)
Definition: Local.cpp:3448
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition: Local.cpp:856
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition: Local.cpp:673
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition: Local.cpp:1873
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition: Local.cpp:632
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition: Local.cpp:2487
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3937
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition: Local.cpp:3717
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1355
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition: Local.cpp:865
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition: Local.cpp:1391
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition: Local.cpp:2595
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition: Local.cpp:1653
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition: Local.cpp:936
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgValueInst *DVI, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition: Local.cpp:2152
static void insertDbgVariableRecordsForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Definition: Local.cpp:2004
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, const SmallPtrSetImpl< BasicBlock * > &SuccPreds, BasicBlock *&CommonPred)
Definition: Local.cpp:1018
static const unsigned BitPartRecursionMaxDepth
Definition: Local.cpp:117
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition: Local.cpp:1051
cl::opt< bool > UseNewDbgInfoFormat
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition: Local.cpp:3948
static void salvageDbgAssignAddress(T *Assign)
Definition: Local.cpp:2209
This defines the Use class.
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1385
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
unsigned popcount() const
Count the number of bits set.
Definition: APInt.h:1620
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:349
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition: APInt.h:547
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition: APInt.h:1525
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1199
an instruction to allocate memory on the stack
Definition: Instructions.h:59
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:174
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:210
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:443
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:409
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition: BasicBlock.h:640
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:367
void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
const Instruction & front() const
Definition: BasicBlock.h:453
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:199
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
Definition: BasicBlock.cpp:284
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:452
void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
Definition: BasicBlock.cpp:712
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:276
void insertDbgRecordAfter(DbgRecord *DR, Instruction *I)
Insert a DbgRecord into a block at the position given by I.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:379
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition: BasicBlock.cpp:168
size_t size() const
Definition: BasicBlock.h:451
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
Definition: BasicBlock.cpp:478
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:613
const Instruction & back() const
Definition: BasicBlock.h:455
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:509
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
Definition: InstrTypes.h:513
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition: Constants.h:889
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1846
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1804
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1851
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1823
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1678
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:1017
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:1021
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
bool isSigned() const
Definition: InstrTypes.h:1265
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1105
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1017
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2126
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2529
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2112
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2535
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1775
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
This is an important base class in LLVM.
Definition: Constant.h:41
void destroyConstant()
Called if some element of this constant is no longer valid.
Definition: Constants.cpp:472
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition: DIBuilder.h:779
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
uint64_t getElement(unsigned I) const
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
std::optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
bool isAddressOfVariable() const
Does this describe the address of a local variable.
std::optional< uint64_t > getFragmentSizeInBits() const
Get the size (in bits) of the variable, or fragment of the variable that is described.
DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
unsigned getNumVariableLocationOps() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition: DebugLoc.h:33
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
unsigned size() const
Definition: DenseMap.h:99
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
void recalculate(Function &F)
Notify DTU that the entry block was replaced.
bool hasDomTree() const
Returns true if it holds a DominatorTree.
bool isBBPendingDeletion(BasicBlock *DelBB) const
Returns true if DelBB is awaiting deletion.
void applyUpdates(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
void deleteBB(BasicBlock *DelBB)
Delete DelBB.
void applyUpdatesPermissive(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
const BasicBlock & getEntryBlock() const
Definition: Function.h:783
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2241
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition: IRBuilder.h:1120
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1114
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
Definition: Metadata.cpp:1745
void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:812
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:359
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1636
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:451
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
BasicBlock * getUnwindDest() const
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition: MDBuilder.cpp:94
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
static MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1135
static MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
static MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
Definition: Metadata.cpp:1217
static MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1167
static MDNode * getMostGenericRange(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1279
static MDNode * intersect(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1122
static MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
Definition: Metadata.cpp:1351
static MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
iterator end()
Definition: MapVector.h:71
iterator find(const KeyT &Key)
Definition: MapVector.h:167
bool empty() const
Definition: MapVector.h:79
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: MapVector.h:141
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
const_block_iterator block_begin() const
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
const_block_iterator block_end() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
T get() const
Returns the value of the specified pointer type.
Definition: PointerUnion.h:155
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition: SetVector.h:87
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
value_type pop_back_val()
Definition: SetVector.h:285
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
size_type size() const
Definition: SmallSet.h:161
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:818
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:29
bool empty() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:243
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
value_op_iterator value_op_end()
Definition: User.h:263
Value * getOperand(unsigned i) const
Definition: User.h:169
value_op_iterator value_op_begin()
Definition: User.h:260
Value wrapper in the Metadata hierarchy.
Definition: Metadata.h:450
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:495
iterator find(const KeyT &Val)
Definition: ValueMap.h:155
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: ValueMap.h:172
size_type size() const
Definition: ValueMap.h:140
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition: Value.cpp:542
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition: Value.h:806
user_iterator_impl< User > user_iterator
Definition: Value.h:390
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition: DenseSet.h:90
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:836
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
Definition: PatternMatch.h:783
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:573
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition: Dwarf.h:146
@ ebStrict
This corresponds to "fpexcept.strict".
Definition: FPEnv.h:41
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
@ Offset
Definition: DWP.cpp:456
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:540
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2919
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:130
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
Definition: DebugInfo.cpp:47
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2801
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition: Local.cpp:3466
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2242
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:148
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1650
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2899
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition: Local.cpp:3363
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition: STLExtras.h:2511
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:731
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
Definition: DebugInfo.cpp:141
void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition: Local.cpp:2068
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2783
bool canSimplifyInvokeNoUnwind(const Function *F)
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition: Local.cpp:1120
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition: Local.cpp:3953
MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1543
bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition: Local.cpp:407
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1916
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2043
DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition: Local.cpp:3630
CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition: Local.cpp:2873
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition: Local.cpp:3160
bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition: Local.cpp:419
void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition: Local.cpp:3415
DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
Definition: DebugInfo.cpp:161
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1691
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition: Local.cpp:3481
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3235
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2833
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2711
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition: Local.cpp:2541
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:3341
void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition: Local.cpp:3577
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition: Local.cpp:771
bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition: Local.cpp:617
void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition: Local.cpp:3587
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition: Local.cpp:3553
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition: Local.cpp:3528
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition: Local.cpp:4065
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple llvm.dbg.value instructions when the alloca it describes is replaced with a new val...
Definition: Local.cpp:2181
Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition: Local.cpp:1495
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition: Local.cpp:555
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1921
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
gep_type_iterator gep_type_begin(const User *GEP)
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
Definition: DebugInfo.cpp:66
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:651
bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition: Local.cpp:4172
Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition: Local.cpp:4138
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:613
void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition: Local.cpp:4055
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:3198
bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition: Local.cpp:1487
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition: Local.cpp:3499
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:491
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces llvm.dbg.declare instruction when the address it describes is replaced with a new value.
Definition: Local.cpp:2132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
#define NDEBUG
Definition: regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:50
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:238
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117