LLVM 23.0.0git
Local.cpp
Go to the documentation of this file.
1//===- Local.cpp - Functions to perform local transformations -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This family of functions perform various local transformations to the
10// program.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Hashing.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Dominators.h"
50#include "llvm/IR/Function.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsWebAssembly.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
66#include "llvm/IR/Type.h"
67#include "llvm/IR/Use.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/IR/ValueHandle.h"
74#include "llvm/Support/Debug.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <iterator>
84#include <map>
85#include <optional>
86#include <utility>
87
88using namespace llvm;
89using namespace llvm::PatternMatch;
90
91#define DEBUG_TYPE "local"
92
93STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95
97 "phicse-debug-hash",
98#ifdef EXPENSIVE_CHECKS
99 cl::init(true),
100#else
101 cl::init(false),
102#endif
104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105 "function is well-behaved w.r.t. its isEqual predicate"));
106
108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109 cl::desc(
110 "When the basic block contains not more than this number of PHI nodes, "
111 "perform a (faster!) exhaustive search instead of set-driven one."));
112
114 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000),
116 cl::desc("Stop removing an empty block if removing it will introduce more "
117 "than this number of phi entries in its successor"));
118
119// Max recursion depth for collectBitParts used when detecting bswap and
120// bitreverse idioms.
121static const unsigned BitPartRecursionMaxDepth = 48;
122
123//===----------------------------------------------------------------------===//
124// Local constant propagation.
125//
126
127/// ConstantFoldTerminator - If a terminator instruction is predicated on a
128/// constant value, convert it into an unconditional branch to the constant
129/// destination. This is a nontrivial operation because the successors of this
130/// basic block must have their PHI nodes updated.
131/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
132/// conditions and indirectbr addresses this might make dead if
133/// DeleteDeadConditions is true.
134bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
135 const TargetLibraryInfo *TLI,
136 DomTreeUpdater *DTU) {
137 Instruction *T = BB->getTerminator();
138 IRBuilder<> Builder(T);
139
140 // Branch - See if we are conditional jumping on constant
141 if (auto *BI = dyn_cast<BranchInst>(T)) {
142 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
143
144 BasicBlock *Dest1 = BI->getSuccessor(0);
145 BasicBlock *Dest2 = BI->getSuccessor(1);
146
147 if (Dest2 == Dest1) { // Conditional branch to same location?
148 // This branch matches something like this:
149 // br bool %cond, label %Dest, label %Dest
150 // and changes it into: br label %Dest
151
152 // Let the basic block know that we are letting go of one copy of it.
153 assert(BI->getParent() && "Terminator not inserted in block!");
154 Dest1->removePredecessor(BI->getParent());
155
156 // Replace the conditional branch with an unconditional one.
157 BranchInst *NewBI = Builder.CreateBr(Dest1);
158
159 // Transfer the metadata to the new branch instruction.
160 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
161 LLVMContext::MD_annotation});
162
163 Value *Cond = BI->getCondition();
164 BI->eraseFromParent();
165 if (DeleteDeadConditions)
167 return true;
168 }
169
170 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
171 // Are we branching on constant?
172 // YES. Change to unconditional branch...
173 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
174 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
175
176 // Let the basic block know that we are letting go of it. Based on this,
177 // it will adjust it's PHI nodes.
178 OldDest->removePredecessor(BB);
179
180 // Replace the conditional branch with an unconditional one.
181 BranchInst *NewBI = Builder.CreateBr(Destination);
182
183 // Transfer the metadata to the new branch instruction.
184 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
185 LLVMContext::MD_annotation});
186
187 BI->eraseFromParent();
188 if (DTU)
189 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
190 return true;
191 }
192
193 return false;
194 }
195
196 if (auto *SI = dyn_cast<SwitchInst>(T)) {
197 // If we are switching on a constant, we can convert the switch to an
198 // unconditional branch.
199 auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
200 BasicBlock *DefaultDest = SI->getDefaultDest();
201 BasicBlock *TheOnlyDest = DefaultDest;
202
203 // If the default is unreachable, ignore it when searching for TheOnlyDest.
204 if (SI->defaultDestUnreachable() && SI->getNumCases() > 0)
205 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
206
207 bool Changed = false;
208
209 // Figure out which case it goes to.
210 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
211 // Found case matching a constant operand?
212 if (It->getCaseValue() == CI) {
213 TheOnlyDest = It->getCaseSuccessor();
214 break;
215 }
216
217 // Check to see if this branch is going to the same place as the default
218 // dest. If so, eliminate it as an explicit compare.
219 if (It->getCaseSuccessor() == DefaultDest) {
221 unsigned NCases = SI->getNumCases();
222 // Fold the case metadata into the default if there will be any branches
223 // left, unless the metadata doesn't match the switch.
224 if (NCases > 1 && MD) {
225 // Collect branch weights into a vector.
227 extractBranchWeights(MD, Weights);
228
229 // Merge weight of this case to the default weight.
230 unsigned Idx = It->getCaseIndex();
231 // TODO: Add overflow check.
232 Weights[0] += Weights[Idx + 1];
233 // Remove weight for this case.
234 std::swap(Weights[Idx + 1], Weights.back());
235 Weights.pop_back();
237 }
238 // Remove this entry.
239 BasicBlock *ParentBB = SI->getParent();
240 DefaultDest->removePredecessor(ParentBB);
241 It = SI->removeCase(It);
242 End = SI->case_end();
243
244 // Removing this case may have made the condition constant. In that
245 // case, update CI and restart iteration through the cases.
246 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
247 CI = NewCI;
248 It = SI->case_begin();
249 }
250
251 Changed = true;
252 continue;
253 }
254
255 // Otherwise, check to see if the switch only branches to one destination.
256 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
257 // destinations.
258 if (It->getCaseSuccessor() != TheOnlyDest)
259 TheOnlyDest = nullptr;
260
261 // Increment this iterator as we haven't removed the case.
262 ++It;
263 }
264
265 if (CI && !TheOnlyDest) {
266 // Branching on a constant, but not any of the cases, go to the default
267 // successor.
268 TheOnlyDest = SI->getDefaultDest();
269 }
270
271 // If we found a single destination that we can fold the switch into, do so
272 // now.
273 if (TheOnlyDest) {
274 // Insert the new branch.
275 Builder.CreateBr(TheOnlyDest);
276 BasicBlock *BB = SI->getParent();
277
278 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
279
280 // Remove entries from PHI nodes which we no longer branch to...
281 BasicBlock *SuccToKeep = TheOnlyDest;
282 for (BasicBlock *Succ : successors(SI)) {
283 if (DTU && Succ != TheOnlyDest)
284 RemovedSuccessors.insert(Succ);
285 // Found case matching a constant operand?
286 if (Succ == SuccToKeep) {
287 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
288 } else {
289 Succ->removePredecessor(BB);
290 }
291 }
292
293 // Delete the old switch.
294 Value *Cond = SI->getCondition();
295 SI->eraseFromParent();
296 if (DeleteDeadConditions)
298 if (DTU) {
299 std::vector<DominatorTree::UpdateType> Updates;
300 Updates.reserve(RemovedSuccessors.size());
301 for (auto *RemovedSuccessor : RemovedSuccessors)
302 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
303 DTU->applyUpdates(Updates);
304 }
305 return true;
306 }
307
308 if (SI->getNumCases() == 1) {
309 // Otherwise, we can fold this switch into a conditional branch
310 // instruction if it has only one non-default destination.
311 auto FirstCase = *SI->case_begin();
312 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
313 FirstCase.getCaseValue(), "cond");
314
315 // Insert the new branch.
316 BranchInst *NewBr = Builder.CreateCondBr(Cond,
317 FirstCase.getCaseSuccessor(),
318 SI->getDefaultDest());
319 SmallVector<uint32_t> Weights;
320 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
321 uint32_t DefWeight = Weights[0];
322 uint32_t CaseWeight = Weights[1];
323 // The TrueWeight should be the weight for the single case of SI.
324 NewBr->setMetadata(LLVMContext::MD_prof,
325 MDBuilder(BB->getContext())
326 .createBranchWeights(CaseWeight, DefWeight));
327 }
328
329 // Update make.implicit metadata to the newly-created conditional branch.
330 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
331 if (MakeImplicitMD)
332 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
333
334 // Delete the old switch.
335 SI->eraseFromParent();
336 return true;
337 }
338 return Changed;
339 }
340
341 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
342 // indirectbr blockaddress(@F, @BB) -> br label @BB
343 if (auto *BA =
344 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
345 BasicBlock *TheOnlyDest = BA->getBasicBlock();
346 SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
347
348 // Insert the new branch.
349 Builder.CreateBr(TheOnlyDest);
350
351 BasicBlock *SuccToKeep = TheOnlyDest;
352 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
353 BasicBlock *DestBB = IBI->getDestination(i);
354 if (DTU && DestBB != TheOnlyDest)
355 RemovedSuccessors.insert(DestBB);
356 if (IBI->getDestination(i) == SuccToKeep) {
357 SuccToKeep = nullptr;
358 } else {
359 DestBB->removePredecessor(BB);
360 }
361 }
362 Value *Address = IBI->getAddress();
363 IBI->eraseFromParent();
364 if (DeleteDeadConditions)
365 // Delete pointer cast instructions.
367
368 // Also zap the blockaddress constant if there are no users remaining,
369 // otherwise the destination is still marked as having its address taken.
370 if (BA->use_empty())
371 BA->destroyConstant();
372
373 // If we didn't find our destination in the IBI successor list, then we
374 // have undefined behavior. Replace the unconditional branch with an
375 // 'unreachable' instruction.
376 if (SuccToKeep) {
378 new UnreachableInst(BB->getContext(), BB);
379 }
380
381 if (DTU) {
382 std::vector<DominatorTree::UpdateType> Updates;
383 Updates.reserve(RemovedSuccessors.size());
384 for (auto *RemovedSuccessor : RemovedSuccessors)
385 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
386 DTU->applyUpdates(Updates);
387 }
388 return true;
389 }
390 }
391
392 return false;
393}
394
395//===----------------------------------------------------------------------===//
396// Local dead code elimination.
397//
398
399/// isInstructionTriviallyDead - Return true if the result produced by the
400/// instruction is not used, and the instruction has no side effects.
401///
403 const TargetLibraryInfo *TLI) {
404 if (!I->use_empty())
405 return false;
407}
408
410 Instruction *I, const TargetLibraryInfo *TLI) {
411 // Instructions that are "markers" and have implied meaning on code around
412 // them (without explicit uses), are not dead on unused paths.
414 if (II->getIntrinsicID() == Intrinsic::stacksave ||
415 II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
416 II->isLifetimeStartOrEnd())
417 return false;
419}
420
422 const TargetLibraryInfo *TLI) {
423 if (I->isTerminator())
424 return false;
425
426 // We don't want the landingpad-like instructions removed by anything this
427 // general.
428 if (I->isEHPad())
429 return false;
430
431 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
432 if (DLI->getLabel())
433 return false;
434 return true;
435 }
436
437 if (auto *CB = dyn_cast<CallBase>(I))
438 if (isRemovableAlloc(CB, TLI))
439 return true;
440
441 if (!I->willReturn()) {
443 if (!II)
444 return false;
445
446 switch (II->getIntrinsicID()) {
447 case Intrinsic::experimental_guard: {
448 // Guards on true are operationally no-ops. In the future we can
449 // consider more sophisticated tradeoffs for guards considering potential
450 // for check widening, but for now we keep things simple.
451 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0));
452 return Cond && Cond->isOne();
453 }
454 // TODO: These intrinsics are not safe to remove, because this may remove
455 // a well-defined trap.
456 case Intrinsic::wasm_trunc_signed:
457 case Intrinsic::wasm_trunc_unsigned:
458 case Intrinsic::ptrauth_auth:
459 case Intrinsic::ptrauth_resign:
460 return true;
461 default:
462 return false;
463 }
464 }
465
466 if (!I->mayHaveSideEffects())
467 return true;
468
469 // Special case intrinsics that "may have side effects" but can be deleted
470 // when dead.
472 // Safe to delete llvm.stacksave and launder.invariant.group if dead.
473 if (II->getIntrinsicID() == Intrinsic::stacksave ||
474 II->getIntrinsicID() == Intrinsic::launder_invariant_group)
475 return true;
476
477 // Intrinsics declare sideeffects to prevent them from moving, but they are
478 // nops without users.
479 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check ||
480 II->getIntrinsicID() == Intrinsic::allow_ubsan_check)
481 return true;
482
483 if (II->isLifetimeStartOrEnd()) {
484 auto *Arg = II->getArgOperand(0);
485 if (isa<PoisonValue>(Arg))
486 return true;
487
488 // If the only uses of the alloca are lifetime intrinsics, then the
489 // intrinsics are dead.
490 return llvm::all_of(Arg->uses(), [](Use &Use) {
491 return isa<LifetimeIntrinsic>(Use.getUser());
492 });
493 }
494
495 // Assumptions are dead if their condition is trivially true.
496 if (II->getIntrinsicID() == Intrinsic::assume &&
498 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
499 return !Cond->isZero();
500
501 return false;
502 }
503
504 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
505 std::optional<fp::ExceptionBehavior> ExBehavior =
506 FPI->getExceptionBehavior();
507 return *ExBehavior != fp::ebStrict;
508 }
509 }
510
511 if (auto *Call = dyn_cast<CallBase>(I)) {
512 if (Value *FreedOp = getFreedOperand(Call, TLI))
513 if (Constant *C = dyn_cast<Constant>(FreedOp))
514 return C->isNullValue() || isa<UndefValue>(C);
515 if (isMathLibCallNoop(Call, TLI))
516 return true;
517 }
518
519 // Non-volatile atomic loads from constants can be removed.
520 if (auto *LI = dyn_cast<LoadInst>(I))
521 if (auto *GV = dyn_cast<GlobalVariable>(
522 LI->getPointerOperand()->stripPointerCasts()))
523 if (!LI->isVolatile() && GV->isConstant())
524 return true;
525
526 return false;
527}
528
529/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
530/// trivially dead instruction, delete it. If that makes any of its operands
531/// trivially dead, delete them too, recursively. Return true if any
532/// instructions were deleted.
534 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
535 std::function<void(Value *)> AboutToDeleteCallback) {
537 if (!I || !isInstructionTriviallyDead(I, TLI))
538 return false;
539
541 DeadInsts.push_back(I);
542 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
543 AboutToDeleteCallback);
544
545 return true;
546}
547
550 MemorySSAUpdater *MSSAU,
551 std::function<void(Value *)> AboutToDeleteCallback) {
552 unsigned S = 0, E = DeadInsts.size(), Alive = 0;
553 for (; S != E; ++S) {
554 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
555 if (!I || !isInstructionTriviallyDead(I)) {
556 DeadInsts[S] = nullptr;
557 ++Alive;
558 }
559 }
560 if (Alive == E)
561 return false;
562 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
563 AboutToDeleteCallback);
564 return true;
565}
566
569 MemorySSAUpdater *MSSAU,
570 std::function<void(Value *)> AboutToDeleteCallback) {
571 // Process the dead instruction list until empty.
572 while (!DeadInsts.empty()) {
573 Value *V = DeadInsts.pop_back_val();
575 if (!I)
576 continue;
578 "Live instruction found in dead worklist!");
579 assert(I->use_empty() && "Instructions with uses are not dead.");
580
581 // Don't lose the debug info while deleting the instructions.
583
584 if (AboutToDeleteCallback)
585 AboutToDeleteCallback(I);
586
587 // Null out all of the instruction's operands to see if any operand becomes
588 // dead as we go.
589 for (Use &OpU : I->operands()) {
590 Value *OpV = OpU.get();
591 OpU.set(nullptr);
592
593 if (!OpV->use_empty())
594 continue;
595
596 // If the operand is an instruction that became dead as we nulled out the
597 // operand, and if it is 'trivially' dead, delete it in a future loop
598 // iteration.
599 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
600 if (isInstructionTriviallyDead(OpI, TLI))
601 DeadInsts.push_back(OpI);
602 }
603 if (MSSAU)
604 MSSAU->removeMemoryAccess(I);
605
606 I->eraseFromParent();
607 }
608}
609
612 findDbgUsers(I, DPUsers);
613 for (auto *DVR : DPUsers)
614 DVR->setKillLocation();
615 return !DPUsers.empty();
616}
617
618/// areAllUsesEqual - Check whether the uses of a value are all the same.
619/// This is similar to Instruction::hasOneUse() except this will also return
620/// true when there are no uses or multiple uses that all refer to the same
621/// value.
623 Value::user_iterator UI = I->user_begin();
624 Value::user_iterator UE = I->user_end();
625 if (UI == UE)
626 return true;
627
628 User *TheUse = *UI;
629 for (++UI; UI != UE; ++UI) {
630 if (*UI != TheUse)
631 return false;
632 }
633 return true;
634}
635
636/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
637/// dead PHI node, due to being a def-use chain of single-use nodes that
638/// either forms a cycle or is terminated by a trivially dead instruction,
639/// delete it. If that makes any of its operands trivially dead, delete them
640/// too, recursively. Return true if a change was made.
642 const TargetLibraryInfo *TLI,
643 llvm::MemorySSAUpdater *MSSAU) {
645 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
646 I = cast<Instruction>(*I->user_begin())) {
647 if (I->use_empty())
649
650 // If we find an instruction more than once, we're on a cycle that
651 // won't prove fruitful.
652 if (!Visited.insert(I).second) {
653 // Break the cycle and delete the instruction and its operands.
654 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
656 return true;
657 }
658 }
659 return false;
660}
661
662static bool
665 const DataLayout &DL,
666 const TargetLibraryInfo *TLI) {
667 if (isInstructionTriviallyDead(I, TLI)) {
669
670 // Null out all of the instruction's operands to see if any operand becomes
671 // dead as we go.
672 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
673 Value *OpV = I->getOperand(i);
674 I->setOperand(i, nullptr);
675
676 if (!OpV->use_empty() || I == OpV)
677 continue;
678
679 // If the operand is an instruction that became dead as we nulled out the
680 // operand, and if it is 'trivially' dead, delete it in a future loop
681 // iteration.
682 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
683 if (isInstructionTriviallyDead(OpI, TLI))
684 WorkList.insert(OpI);
685 }
686
687 I->eraseFromParent();
688
689 return true;
690 }
691
692 if (Value *SimpleV = simplifyInstruction(I, DL)) {
693 // Add the users to the worklist. CAREFUL: an instruction can use itself,
694 // in the case of a phi node.
695 for (User *U : I->users()) {
696 if (U != I) {
697 WorkList.insert(cast<Instruction>(U));
698 }
699 }
700
701 // Replace the instruction with its simplified value.
702 bool Changed = false;
703 if (!I->use_empty()) {
704 I->replaceAllUsesWith(SimpleV);
705 Changed = true;
706 }
707 if (isInstructionTriviallyDead(I, TLI)) {
708 I->eraseFromParent();
709 Changed = true;
710 }
711 return Changed;
712 }
713 return false;
714}
715
716/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
717/// simplify any instructions in it and recursively delete dead instructions.
718///
719/// This returns true if it changed the code, note that it can delete
720/// instructions in other blocks as well in this block.
722 const TargetLibraryInfo *TLI) {
723 bool MadeChange = false;
724 const DataLayout &DL = BB->getDataLayout();
725
726#ifndef NDEBUG
727 // In debug builds, ensure that the terminator of the block is never replaced
728 // or deleted by these simplifications. The idea of simplification is that it
729 // cannot introduce new instructions, and there is no way to replace the
730 // terminator of a block without introducing a new instruction.
731 AssertingVH<Instruction> TerminatorVH(&BB->back());
732#endif
733
735 // Iterate over the original function, only adding insts to the worklist
736 // if they actually need to be revisited. This avoids having to pre-init
737 // the worklist with the entire function's worth of instructions.
738 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
739 BI != E;) {
740 assert(!BI->isTerminator());
741 Instruction *I = &*BI;
742 ++BI;
743
744 // We're visiting this instruction now, so make sure it's not in the
745 // worklist from an earlier visit.
746 if (!WorkList.count(I))
747 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
748 }
749
750 while (!WorkList.empty()) {
751 Instruction *I = WorkList.pop_back_val();
752 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
753 }
754 return MadeChange;
755}
756
757//===----------------------------------------------------------------------===//
758// Control Flow Graph Restructuring.
759//
760
762 DomTreeUpdater *DTU) {
763
764 // If BB has single-entry PHI nodes, fold them.
765 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
766 Value *NewVal = PN->getIncomingValue(0);
767 // Replace self referencing PHI with poison, it must be dead.
768 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
769 PN->replaceAllUsesWith(NewVal);
770 PN->eraseFromParent();
771 }
772
773 BasicBlock *PredBB = DestBB->getSinglePredecessor();
774 assert(PredBB && "Block doesn't have a single predecessor!");
775
776 bool ReplaceEntryBB = PredBB->isEntryBlock();
777
778 // DTU updates: Collect all the edges that enter
779 // PredBB. These dominator edges will be redirected to DestBB.
781
782 if (DTU) {
783 // To avoid processing the same predecessor more than once.
785 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
786 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
787 // This predecessor of PredBB may already have DestBB as a successor.
788 if (PredOfPredBB != PredBB)
789 if (SeenPreds.insert(PredOfPredBB).second)
790 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
791 SeenPreds.clear();
792 for (BasicBlock *PredOfPredBB : predecessors(PredBB))
793 if (SeenPreds.insert(PredOfPredBB).second)
794 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
795 Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
796 }
797
798 // Zap anything that took the address of DestBB. Not doing this will give the
799 // address an invalid value.
800 if (DestBB->hasAddressTaken()) {
801 BlockAddress *BA = BlockAddress::get(DestBB);
802 Constant *Replacement =
803 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
805 BA->getType()));
806 BA->destroyConstant();
807 }
808
809 // Anything that branched to PredBB now branches to DestBB.
810 PredBB->replaceAllUsesWith(DestBB);
811
812 // Splice all the instructions from PredBB to DestBB.
813 PredBB->getTerminator()->eraseFromParent();
814 DestBB->splice(DestBB->begin(), PredBB);
815 new UnreachableInst(PredBB->getContext(), PredBB);
816
817 // If the PredBB is the entry block of the function, move DestBB up to
818 // become the entry block after we erase PredBB.
819 if (ReplaceEntryBB)
820 DestBB->moveAfter(PredBB);
821
822 if (DTU) {
823 assert(PredBB->size() == 1 &&
825 "The successor list of PredBB isn't empty before "
826 "applying corresponding DTU updates.");
827 DTU->applyUpdatesPermissive(Updates);
828 DTU->deleteBB(PredBB);
829 // Recalculation of DomTree is needed when updating a forward DomTree and
830 // the Entry BB is replaced.
831 if (ReplaceEntryBB && DTU->hasDomTree()) {
832 // The entry block was removed and there is no external interface for
833 // the dominator tree to be notified of this change. In this corner-case
834 // we recalculate the entire tree.
835 DTU->recalculate(*(DestBB->getParent()));
836 }
837 }
838
839 else {
840 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
841 }
842}
843
844/// Return true if we can choose one of these values to use in place of the
845/// other. Note that we will always choose the non-undef value to keep.
846static bool CanMergeValues(Value *First, Value *Second) {
847 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
848}
849
850/// Return true if we can fold BB, an almost-empty BB ending in an unconditional
851/// branch to Succ, into Succ.
852///
853/// Assumption: Succ is the single successor for BB.
854static bool
856 const SmallPtrSetImpl<BasicBlock *> &BBPreds) {
857 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
858
859 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
860 << Succ->getName() << "\n");
861 // Shortcut, if there is only a single predecessor it must be BB and merging
862 // is always safe
863 if (Succ->getSinglePredecessor())
864 return true;
865
866 // Look at all the phi nodes in Succ, to see if they present a conflict when
867 // merging these blocks
868 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
869 PHINode *PN = cast<PHINode>(I);
870
871 // If the incoming value from BB is again a PHINode in
872 // BB which has the same incoming value for *PI as PN does, we can
873 // merge the phi nodes and then the blocks can still be merged
875 if (BBPN && BBPN->getParent() == BB) {
876 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
877 BasicBlock *IBB = PN->getIncomingBlock(PI);
878 if (BBPreds.count(IBB) &&
880 PN->getIncomingValue(PI))) {
882 << "Can't fold, phi node " << PN->getName() << " in "
883 << Succ->getName() << " is conflicting with "
884 << BBPN->getName() << " with regard to common predecessor "
885 << IBB->getName() << "\n");
886 return false;
887 }
888 }
889 } else {
890 Value* Val = PN->getIncomingValueForBlock(BB);
891 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
892 // See if the incoming value for the common predecessor is equal to the
893 // one for BB, in which case this phi node will not prevent the merging
894 // of the block.
895 BasicBlock *IBB = PN->getIncomingBlock(PI);
896 if (BBPreds.count(IBB) &&
897 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
898 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
899 << " in " << Succ->getName()
900 << " is conflicting with regard to common "
901 << "predecessor " << IBB->getName() << "\n");
902 return false;
903 }
904 }
905 }
906 }
907
908 return true;
909}
910
913
914/// Determines the value to use as the phi node input for a block.
915///
916/// Select between \p OldVal any value that we know flows from \p BB
917/// to a particular phi on the basis of which one (if either) is not
918/// undef. Update IncomingValues based on the selected value.
919///
920/// \param OldVal The value we are considering selecting.
921/// \param BB The block that the value flows in from.
922/// \param IncomingValues A map from block-to-value for other phi inputs
923/// that we have examined.
924///
925/// \returns the selected value.
927 IncomingValueMap &IncomingValues) {
928 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
929 if (!isa<UndefValue>(OldVal)) {
930 assert((It != IncomingValues.end() &&
931 (!(It->second) || It->second == OldVal)) &&
932 "Expected OldVal to match incoming value from BB!");
933
934 IncomingValues.insert_or_assign(BB, OldVal);
935 return OldVal;
936 }
937
938 if (It != IncomingValues.end() && It->second)
939 return It->second;
940
941 return OldVal;
942}
943
944/// Create a map from block to value for the operands of a
945/// given phi.
946///
947/// This function initializes the map with UndefValue for all predecessors
948/// in BBPreds, and then updates the map with concrete non-undef values
949/// found in the PHI node.
950///
951/// \param PN The phi we are collecting the map for.
952/// \param BBPreds The list of all predecessor blocks to initialize with Undef.
953/// \param IncomingValues [out] The map from block to value for this phi.
955 const PredBlockVector &BBPreds,
956 IncomingValueMap &IncomingValues) {
957 for (BasicBlock *Pred : BBPreds)
958 IncomingValues[Pred] = nullptr;
959
960 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
961 Value *V = PN->getIncomingValue(i);
962 if (isa<UndefValue>(V))
963 continue;
964
965 BasicBlock *BB = PN->getIncomingBlock(i);
966 auto It = IncomingValues.find(BB);
967 if (It != IncomingValues.end())
968 It->second = V;
969 }
970}
971
972/// Replace the incoming undef values to a phi with the values
973/// from a block-to-value map.
974///
975/// \param PN The phi we are replacing the undefs in.
976/// \param IncomingValues A map from block to value.
978 const IncomingValueMap &IncomingValues) {
979 SmallVector<unsigned> TrueUndefOps;
980 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
981 Value *V = PN->getIncomingValue(i);
982
983 if (!isa<UndefValue>(V)) continue;
984
985 BasicBlock *BB = PN->getIncomingBlock(i);
986 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
987 if (It == IncomingValues.end())
988 continue;
989
990 // Keep track of undef/poison incoming values. Those must match, so we fix
991 // them up below if needed.
992 // Note: this is conservatively correct, but we could try harder and group
993 // the undef values per incoming basic block.
994 if (!It->second) {
995 TrueUndefOps.push_back(i);
996 continue;
997 }
998
999 // There is a defined value for this incoming block, so map this undef
1000 // incoming value to the defined value.
1001 PN->setIncomingValue(i, It->second);
1002 }
1003
1004 // If there are both undef and poison values incoming, then convert those
1005 // values to undef. It is invalid to have different values for the same
1006 // incoming block.
1007 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
1008 return isa<PoisonValue>(PN->getIncomingValue(i));
1009 });
1010 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
1011 for (unsigned i : TrueUndefOps)
1013 }
1014}
1015
1016// Only when they shares a single common predecessor, return true.
1017// Only handles cases when BB can't be merged while its predecessors can be
1018// redirected.
1019static bool
1021 const SmallPtrSetImpl<BasicBlock *> &BBPreds,
1022 BasicBlock *&CommonPred) {
1023
1024 // There must be phis in BB, otherwise BB will be merged into Succ directly
1025 if (BB->phis().empty() || Succ->phis().empty())
1026 return false;
1027
1028 // BB must have predecessors not shared that can be redirected to Succ
1029 if (!BB->hasNPredecessorsOrMore(2))
1030 return false;
1031
1032 if (any_of(BBPreds, [](const BasicBlock *Pred) {
1033 return isa<IndirectBrInst>(Pred->getTerminator());
1034 }))
1035 return false;
1036
1037 // Get the single common predecessor of both BB and Succ. Return false
1038 // when there are more than one common predecessors.
1039 for (BasicBlock *SuccPred : predecessors(Succ)) {
1040 if (BBPreds.count(SuccPred)) {
1041 if (CommonPred)
1042 return false;
1043 CommonPred = SuccPred;
1044 }
1045 }
1046
1047 return true;
1048}
1049
1050/// Check whether removing \p BB will make the phis in its \p Succ have too
1051/// many incoming entries. This function does not check whether \p BB is
1052/// foldable or not.
1054 // If BB only has one predecessor, then removing it will not introduce more
1055 // incoming edges for phis.
1056 if (BB->hasNPredecessors(1))
1057 return false;
1058 unsigned NumPreds = pred_size(BB);
1059 unsigned NumChangedPhi = 0;
1060 for (auto &Phi : Succ->phis()) {
1061 // If the incoming value is a phi and the phi is defined in BB,
1062 // then removing BB will not increase the total phi entries of the ir.
1063 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB)))
1064 if (IncomingPhi->getParent() == BB)
1065 continue;
1066 // Otherwise, we need to add entries to the phi
1067 NumChangedPhi++;
1068 }
1069 // For every phi that needs to be changed, (NumPreds - 1) new entries will be
1070 // added. If the total increase in phi entries exceeds
1071 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as
1072 // introducing too many new phi entries.
1073 return (NumPreds - 1) * NumChangedPhi >
1075}
1076
1077/// Replace a value flowing from a block to a phi with
1078/// potentially multiple instances of that value flowing from the
1079/// block's predecessors to the phi.
1080///
1081/// \param BB The block with the value flowing into the phi.
1082/// \param BBPreds The predecessors of BB.
1083/// \param PN The phi that we are updating.
1084/// \param CommonPred The common predecessor of BB and PN's BasicBlock
1086 const PredBlockVector &BBPreds,
1087 PHINode *PN,
1088 BasicBlock *CommonPred) {
1089 Value *OldVal = PN->removeIncomingValue(BB, false);
1090 assert(OldVal && "No entry in PHI for Pred BB!");
1091
1092 // Map BBPreds to defined values or nullptr (representing undefined values).
1093 IncomingValueMap IncomingValues;
1094
1095 // We are merging two blocks - BB, and the block containing PN - and
1096 // as a result we need to redirect edges from the predecessors of BB
1097 // to go to the block containing PN, and update PN
1098 // accordingly. Since we allow merging blocks in the case where the
1099 // predecessor and successor blocks both share some predecessors,
1100 // and where some of those common predecessors might have undef
1101 // values flowing into PN, we want to rewrite those values to be
1102 // consistent with the non-undef values.
1103
1104 gatherIncomingValuesToPhi(PN, BBPreds, IncomingValues);
1105
1106 // If this incoming value is one of the PHI nodes in BB, the new entries
1107 // in the PHI node are the entries from the old PHI.
1108 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1109 PHINode *OldValPN = cast<PHINode>(OldVal);
1110 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1111 // Note that, since we are merging phi nodes and BB and Succ might
1112 // have common predecessors, we could end up with a phi node with
1113 // identical incoming branches. This will be cleaned up later (and
1114 // will trigger asserts if we try to clean it up now, without also
1115 // simplifying the corresponding conditional branch).
1116 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1117
1118 if (PredBB == CommonPred)
1119 continue;
1120
1121 Value *PredVal = OldValPN->getIncomingValue(i);
1122 Value *Selected =
1123 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues);
1124
1125 // And add a new incoming value for this predecessor for the
1126 // newly retargeted branch.
1127 PN->addIncoming(Selected, PredBB);
1128 }
1129 if (CommonPred)
1130 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB);
1131
1132 } else {
1133 for (BasicBlock *PredBB : BBPreds) {
1134 // Update existing incoming values in PN for this
1135 // predecessor of BB.
1136 if (PredBB == CommonPred)
1137 continue;
1138
1139 Value *Selected =
1140 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues);
1141
1142 // And add a new incoming value for this predecessor for the
1143 // newly retargeted branch.
1144 PN->addIncoming(Selected, PredBB);
1145 }
1146 if (CommonPred)
1147 PN->addIncoming(OldVal, BB);
1148 }
1149
1150 replaceUndefValuesInPhi(PN, IncomingValues);
1151}
1152
1154 DomTreeUpdater *DTU) {
1155 assert(BB != &BB->getParent()->getEntryBlock() &&
1156 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1157
1158 // We can't simplify infinite loops.
1159 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1160 if (BB == Succ)
1161 return false;
1162
1164
1165 // The single common predecessor of BB and Succ when BB cannot be killed
1166 BasicBlock *CommonPred = nullptr;
1167
1168 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds);
1169
1170 // Even if we can not fold BB into Succ, we may be able to redirect the
1171 // predecessors of BB to Succ.
1172 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc(
1173 BB, Succ, BBPreds, CommonPred);
1174
1175 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ))
1176 return false;
1177
1178 // Check to see if merging these blocks/phis would cause conflicts for any of
1179 // the phi nodes in BB or Succ. If not, we can safely merge.
1180
1181 // Check for cases where Succ has multiple predecessors and a PHI node in BB
1182 // has uses which will not disappear when the PHI nodes are merged. It is
1183 // possible to handle such cases, but difficult: it requires checking whether
1184 // BB dominates Succ, which is non-trivial to calculate in the case where
1185 // Succ has multiple predecessors. Also, it requires checking whether
1186 // constructing the necessary self-referential PHI node doesn't introduce any
1187 // conflicts; this isn't too difficult, but the previous code for doing this
1188 // was incorrect.
1189 //
1190 // Note that if this check finds a live use, BB dominates Succ, so BB is
1191 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1192 // folding the branch isn't profitable in that case anyway.
1193 if (!Succ->getSinglePredecessor()) {
1194 BasicBlock::iterator BBI = BB->begin();
1195 while (isa<PHINode>(*BBI)) {
1196 for (Use &U : BBI->uses()) {
1197 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1198 if (PN->getIncomingBlock(U) != BB)
1199 return false;
1200 } else {
1201 return false;
1202 }
1203 }
1204 ++BBI;
1205 }
1206 }
1207
1208 if (BBPhisMergeable && CommonPred)
1209 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName()
1210 << " and " << Succ->getName() << " : "
1211 << CommonPred->getName() << "\n");
1212
1213 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1214 // metadata.
1215 //
1216 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1217 // current status (that loop metadata is implemented as metadata attached to
1218 // the branch instruction in the loop latch block). To quote from review
1219 // comments, "the current representation of loop metadata (using a loop latch
1220 // terminator attachment) is known to be fundamentally broken. Loop latches
1221 // are not uniquely associated with loops (both in that a latch can be part of
1222 // multiple loops and a loop may have multiple latches). Loop headers are. The
1223 // solution to this problem is also known: Add support for basic block
1224 // metadata, and attach loop metadata to the loop header."
1225 //
1226 // Why bail out:
1227 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1228 // the latch for inner-loop (see reason below), so bail out to prerserve
1229 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1230 // to this inner-loop.
1231 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1232 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1233 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1234 // one self-looping basic block, which is contradictory with the assumption.
1235 //
1236 // To illustrate how inner-loop metadata is dropped:
1237 //
1238 // CFG Before
1239 //
1240 // BB is while.cond.exit, attached with loop metdata md2.
1241 // BB->Pred is for.body, attached with loop metadata md1.
1242 //
1243 // entry
1244 // |
1245 // v
1246 // ---> while.cond -------------> while.end
1247 // | |
1248 // | v
1249 // | while.body
1250 // | |
1251 // | v
1252 // | for.body <---- (md1)
1253 // | | |______|
1254 // | v
1255 // | while.cond.exit (md2)
1256 // | |
1257 // |_______|
1258 //
1259 // CFG After
1260 //
1261 // while.cond1 is the merge of while.cond.exit and while.cond above.
1262 // for.body is attached with md2, and md1 is dropped.
1263 // If LoopSimplify runs later (as a part of loop pass), it could create
1264 // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1265 // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1266 //
1267 // entry
1268 // |
1269 // v
1270 // ---> while.cond1 -------------> while.end
1271 // | |
1272 // | v
1273 // | while.body
1274 // | |
1275 // | v
1276 // | for.body <---- (md2)
1277 // |_______| |______|
1278 if (Instruction *TI = BB->getTerminator())
1279 if (TI->hasNonDebugLocLoopMetadata())
1280 for (BasicBlock *Pred : predecessors(BB))
1281 if (Instruction *PredTI = Pred->getTerminator())
1282 if (PredTI->hasNonDebugLocLoopMetadata())
1283 return false;
1284
1285 if (BBKillable)
1286 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1287 else if (BBPhisMergeable)
1288 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB);
1289
1291
1292 if (DTU) {
1293 // To avoid processing the same predecessor more than once.
1295 // All predecessors of BB (except the common predecessor) will be moved to
1296 // Succ.
1297 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1299 predecessors(Succ));
1300 for (auto *PredOfBB : predecessors(BB)) {
1301 // Do not modify those common predecessors of BB and Succ
1302 if (!SuccPreds.contains(PredOfBB))
1303 if (SeenPreds.insert(PredOfBB).second)
1304 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1305 }
1306
1307 SeenPreds.clear();
1308
1309 for (auto *PredOfBB : predecessors(BB))
1310 // When BB cannot be killed, do not remove the edge between BB and
1311 // CommonPred.
1312 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred)
1313 Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1314
1315 if (BBKillable)
1316 Updates.push_back({DominatorTree::Delete, BB, Succ});
1317 }
1318
1319 if (isa<PHINode>(Succ->begin())) {
1320 // If there is more than one pred of succ, and there are PHI nodes in
1321 // the successor, then we need to add incoming edges for the PHI nodes
1322 //
1323 const PredBlockVector BBPreds(predecessors(BB));
1324
1325 // Loop over all of the PHI nodes in the successor of BB.
1326 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1327 PHINode *PN = cast<PHINode>(I);
1328 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred);
1329 }
1330 }
1331
1332 if (Succ->getSinglePredecessor()) {
1333 // BB is the only predecessor of Succ, so Succ will end up with exactly
1334 // the same predecessors BB had.
1335 // Copy over any phi, debug or lifetime instruction.
1337 Succ->splice(Succ->getFirstNonPHIIt(), BB);
1338 } else {
1339 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1340 // We explicitly check for such uses for merging phis.
1341 assert(PN->use_empty() && "There shouldn't be any uses here!");
1342 PN->eraseFromParent();
1343 }
1344 }
1345
1346 // If the unconditional branch we replaced contains non-debug llvm.loop
1347 // metadata, we add the metadata to the branch instructions in the
1348 // predecessors.
1349 if (Instruction *TI = BB->getTerminator())
1350 if (TI->hasNonDebugLocLoopMetadata()) {
1351 MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop);
1352 for (BasicBlock *Pred : predecessors(BB))
1353 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1354 }
1355
1356 if (BBKillable) {
1357 // Everything that jumped to BB now goes to Succ.
1358 BB->replaceAllUsesWith(Succ);
1359
1360 if (!Succ->hasName())
1361 Succ->takeName(BB);
1362
1363 // Clear the successor list of BB to match updates applying to DTU later.
1364 if (BB->getTerminator())
1365 BB->back().eraseFromParent();
1366
1367 new UnreachableInst(BB->getContext(), BB);
1368 assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1369 "applying corresponding DTU updates.");
1370 } else if (BBPhisMergeable) {
1371 // Everything except CommonPred that jumped to BB now goes to Succ.
1372 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool {
1373 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser()))
1374 return UseInst->getParent() != CommonPred &&
1375 BBPreds.contains(UseInst->getParent());
1376 return false;
1377 });
1378 }
1379
1380 if (DTU)
1381 DTU->applyUpdates(Updates);
1382
1383 if (BBKillable)
1384 DeleteDeadBlock(BB, DTU);
1385
1386 return true;
1387}
1388
1389static bool
1392 // This implementation doesn't currently consider undef operands
1393 // specially. Theoretically, two phis which are identical except for
1394 // one having an undef where the other doesn't could be collapsed.
1395
1396 bool Changed = false;
1397
1398 // Examine each PHI.
1399 // Note that increment of I must *NOT* be in the iteration_expression, since
1400 // we don't want to immediately advance when we restart from the beginning.
1401 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1402 ++I;
1403 // Is there an identical PHI node in this basic block?
1404 // Note that we only look in the upper square's triangle,
1405 // we already checked that the lower triangle PHI's aren't identical.
1406 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1407 if (ToRemove.contains(DuplicatePN))
1408 continue;
1409 if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1410 continue;
1411 // A duplicate. Replace this PHI with the base PHI.
1412 ++NumPHICSEs;
1413 DuplicatePN->replaceAllUsesWith(PN);
1414 ToRemove.insert(DuplicatePN);
1415 Changed = true;
1416
1417 // The RAUW can change PHIs that we already visited.
1418 I = BB->begin();
1419 break; // Start over from the beginning.
1420 }
1421 }
1422 return Changed;
1423}
1424
1425static bool
1428 // This implementation doesn't currently consider undef operands
1429 // specially. Theoretically, two phis which are identical except for
1430 // one having an undef where the other doesn't could be collapsed.
1431
1432 struct PHIDenseMapInfo {
1433 static PHINode *getEmptyKey() {
1435 }
1436
1437 static PHINode *getTombstoneKey() {
1439 }
1440
1441 static bool isSentinel(PHINode *PN) {
1442 return PN == getEmptyKey() || PN == getTombstoneKey();
1443 }
1444
1445 // WARNING: this logic must be kept in sync with
1446 // Instruction::isIdenticalToWhenDefined()!
1447 static unsigned getHashValueImpl(PHINode *PN) {
1448 // Compute a hash value on the operands. Instcombine will likely have
1449 // sorted them, which helps expose duplicates, but we have to check all
1450 // the operands to be safe in case instcombine hasn't run.
1451 return static_cast<unsigned>(
1453 hash_combine_range(PN->blocks())));
1454 }
1455
1456 static unsigned getHashValue(PHINode *PN) {
1457#ifndef NDEBUG
1458 // If -phicse-debug-hash was specified, return a constant -- this
1459 // will force all hashing to collide, so we'll exhaustively search
1460 // the table for a match, and the assertion in isEqual will fire if
1461 // there's a bug causing equal keys to hash differently.
1462 if (PHICSEDebugHash)
1463 return 0;
1464#endif
1465 return getHashValueImpl(PN);
1466 }
1467
1468 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1469 if (isSentinel(LHS) || isSentinel(RHS))
1470 return LHS == RHS;
1471 return LHS->isIdenticalTo(RHS);
1472 }
1473
1474 static bool isEqual(PHINode *LHS, PHINode *RHS) {
1475 // These comparisons are nontrivial, so assert that equality implies
1476 // hash equality (DenseMap demands this as an invariant).
1477 bool Result = isEqualImpl(LHS, RHS);
1478 assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1480 return Result;
1481 }
1482 };
1483
1484 // Set of unique PHINodes.
1486 PHISet.reserve(4 * PHICSENumPHISmallSize);
1487
1488 // Examine each PHI.
1489 bool Changed = false;
1490 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1491 if (ToRemove.contains(PN))
1492 continue;
1493 auto Inserted = PHISet.insert(PN);
1494 if (!Inserted.second) {
1495 // A duplicate. Replace this PHI with its duplicate.
1496 ++NumPHICSEs;
1497 PN->replaceAllUsesWith(*Inserted.first);
1498 ToRemove.insert(PN);
1499 Changed = true;
1500
1501 // The RAUW can change PHIs that we already visited. Start over from the
1502 // beginning.
1503 PHISet.clear();
1504 I = BB->begin();
1505 }
1506 }
1507
1508 return Changed;
1509}
1510
1521
1525 for (PHINode *PN : ToRemove)
1526 PN->eraseFromParent();
1527 return Changed;
1528}
1529
1531 const DataLayout &DL) {
1532 V = V->stripPointerCasts();
1533
1534 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1535 // TODO: Ideally, this function would not be called if PrefAlign is smaller
1536 // than the current alignment, as the known bits calculation should have
1537 // already taken it into account. However, this is not always the case,
1538 // as computeKnownBits() has a depth limit, while stripPointerCasts()
1539 // doesn't.
1540 Align CurrentAlign = AI->getAlign();
1541 if (PrefAlign <= CurrentAlign)
1542 return CurrentAlign;
1543
1544 // If the preferred alignment is greater than the natural stack alignment
1545 // then don't round up. This avoids dynamic stack realignment.
1546 MaybeAlign StackAlign = DL.getStackAlignment();
1547 if (StackAlign && PrefAlign > *StackAlign)
1548 return CurrentAlign;
1549 AI->setAlignment(PrefAlign);
1550 return PrefAlign;
1551 }
1552
1553 if (auto *GV = dyn_cast<GlobalVariable>(V)) {
1554 // TODO: as above, this shouldn't be necessary.
1555 Align CurrentAlign = GV->getPointerAlignment(DL);
1556 if (PrefAlign <= CurrentAlign)
1557 return CurrentAlign;
1558
1559 // If there is a large requested alignment and we can, bump up the alignment
1560 // of the global. If the memory we set aside for the global may not be the
1561 // memory used by the final program then it is impossible for us to reliably
1562 // enforce the preferred alignment.
1563 if (!GV->canIncreaseAlignment())
1564 return CurrentAlign;
1565
1566 if (GV->isThreadLocal()) {
1567 unsigned MaxTLSAlign = GV->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1568 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1569 PrefAlign = Align(MaxTLSAlign);
1570 }
1571
1572 GV->setAlignment(PrefAlign);
1573 return PrefAlign;
1574 }
1575
1576 return Align(1);
1577}
1578
1580 const DataLayout &DL,
1581 const Instruction *CxtI,
1582 AssumptionCache *AC,
1583 const DominatorTree *DT) {
1584 assert(V->getType()->isPointerTy() &&
1585 "getOrEnforceKnownAlignment expects a pointer!");
1586
1587 KnownBits Known = computeKnownBits(V, DL, AC, CxtI, DT);
1588 unsigned TrailZ = Known.countMinTrailingZeros();
1589
1590 // Avoid trouble with ridiculously large TrailZ values, such as
1591 // those computed from a null pointer.
1592 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1593 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1594
1595 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1596
1597 if (PrefAlign && *PrefAlign > Alignment)
1598 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1599
1600 // We don't need to make any adjustment.
1601 return Alignment;
1602}
1603
1604///===---------------------------------------------------------------------===//
1605/// Dbg Intrinsic utilities
1606///
1607
1608/// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1610 DIExpression *DIExpr,
1611 PHINode *APN) {
1612 // Since we can't guarantee that the original dbg.declare intrinsic
1613 // is removed by LowerDbgDeclare(), we need to make sure that we are
1614 // not inserting the same dbg.value intrinsic over and over.
1615 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords;
1616 findDbgValues(APN, DbgVariableRecords);
1617 for (DbgVariableRecord *DVR : DbgVariableRecords) {
1618 assert(is_contained(DVR->location_ops(), APN));
1619 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr))
1620 return true;
1621 }
1622 return false;
1623}
1624
1625/// Check if the alloc size of \p ValTy is large enough to cover the variable
1626/// (or fragment of the variable) described by \p DII.
1627///
1628/// This is primarily intended as a helper for the different
1629/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1630/// describes an alloca'd variable, so we need to use the alloc size of the
1631/// value when doing the comparison. E.g. an i1 value will be identified as
1632/// covering an n-bit fragment, if the store size of i1 is at least n bits.
1634 const DataLayout &DL = DVR->getModule()->getDataLayout();
1635 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1636 if (std::optional<uint64_t> FragmentSize =
1637 DVR->getExpression()->getActiveBits(DVR->getVariable()))
1638 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1639
1640 // We can't always calculate the size of the DI variable (e.g. if it is a
1641 // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1642 // instead.
1643 if (DVR->isAddressOfVariable()) {
1644 // DVR should have exactly 1 location when it is an address.
1645 assert(DVR->getNumVariableLocationOps() == 1 &&
1646 "address of variable must have exactly 1 location operand.");
1647 if (auto *AI =
1649 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1650 return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1651 }
1652 }
1653 }
1654 // Could not determine size of variable. Conservatively return false.
1655 return false;
1656}
1657
1659 DILocalVariable *DIVar,
1660 DIExpression *DIExpr,
1661 const DebugLoc &NewLoc,
1662 BasicBlock::iterator Instr) {
1664 DbgVariableRecord *DVRec =
1665 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1666 Instr->getParent()->insertDbgRecordBefore(DVRec, Instr);
1667}
1668
1670 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1;
1671 return DIExpression::get(DIExpr->getContext(),
1672 DIExpr->getElements().drop_front(NumEltDropped));
1673}
1674
1676 StoreInst *SI, DIBuilder &Builder) {
1677 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign());
1678 auto *DIVar = DVR->getVariable();
1679 assert(DIVar && "Missing variable");
1680 auto *DIExpr = DVR->getExpression();
1681 Value *DV = SI->getValueOperand();
1682
1683 DebugLoc NewLoc = getDebugValueLoc(DVR);
1684
1685 // If the alloca describes the variable itself, i.e. the expression in the
1686 // dbg.declare doesn't start with a dereference, we can perform the
1687 // conversion if the value covers the entire fragment of DII.
1688 // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1689 // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1690 // We conservatively ignore other dereferences, because the following two are
1691 // not equivalent:
1692 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1693 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1694 // The former is adding 2 to the address of the variable, whereas the latter
1695 // is adding 2 to the value of the variable. As such, we insist on just a
1696 // deref expression.
1697 bool CanConvert =
1698 DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1700 if (CanConvert) {
1701 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1702 SI->getIterator());
1703 return;
1704 }
1705
1706 // FIXME: If storing to a part of the variable described by the dbg.declare,
1707 // then we want to insert a dbg.value for the corresponding fragment.
1708 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR
1709 << '\n');
1710
1711 // For now, when there is a store to parts of the variable (but we do not
1712 // know which part) we insert an dbg.value intrinsic to indicate that we
1713 // know nothing about the variable's content.
1714 DV = PoisonValue::get(DV->getType());
1716 DbgVariableRecord *NewDVR =
1717 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get());
1718 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator());
1719}
1720
1722 DIBuilder &Builder) {
1723 auto *DIVar = DVR->getVariable();
1724 assert(DIVar && "Missing variable");
1725 auto *DIExpr = DVR->getExpression();
1726 DIExpr = dropInitialDeref(DIExpr);
1727 Value *DV = SI->getValueOperand();
1728
1729 DebugLoc NewLoc = getDebugValueLoc(DVR);
1730
1731 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc,
1732 SI->getIterator());
1733}
1734
1736 DIBuilder &Builder) {
1737 auto *DIVar = DVR->getVariable();
1738 auto *DIExpr = DVR->getExpression();
1739 assert(DIVar && "Missing variable");
1740
1741 if (!valueCoversEntireFragment(LI->getType(), DVR)) {
1742 // FIXME: If only referring to a part of the variable described by the
1743 // dbg.declare, then we want to insert a DbgVariableRecord for the
1744 // corresponding fragment.
1745 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1746 << *DVR << '\n');
1747 return;
1748 }
1749
1750 DebugLoc NewLoc = getDebugValueLoc(DVR);
1751
1752 // We are now tracking the loaded value instead of the address. In the
1753 // future if multi-location support is added to the IR, it might be
1754 // preferable to keep tracking both the loaded value and the original
1755 // address in case the alloca can not be elided.
1756
1757 // Create a DbgVariableRecord directly and insert.
1759 DbgVariableRecord *DV =
1760 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get());
1761 LI->getParent()->insertDbgRecordAfter(DV, LI);
1762}
1763
1764/// Determine whether this alloca is either a VLA or an array.
1765static bool isArray(AllocaInst *AI) {
1766 return AI->isArrayAllocation() ||
1767 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1768}
1769
1770/// Determine whether this alloca is a structure.
1771static bool isStructure(AllocaInst *AI) {
1772 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1773}
1775 DIBuilder &Builder) {
1776 auto *DIVar = DVR->getVariable();
1777 auto *DIExpr = DVR->getExpression();
1778 assert(DIVar && "Missing variable");
1779
1780 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1781 return;
1782
1783 if (!valueCoversEntireFragment(APN->getType(), DVR)) {
1784 // FIXME: If only referring to a part of the variable described by the
1785 // dbg.declare, then we want to insert a DbgVariableRecord for the
1786 // corresponding fragment.
1787 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: "
1788 << *DVR << '\n');
1789 return;
1790 }
1791
1792 BasicBlock *BB = APN->getParent();
1793 auto InsertionPt = BB->getFirstInsertionPt();
1794
1795 DebugLoc NewLoc = getDebugValueLoc(DVR);
1796
1797 // The block may be a catchswitch block, which does not have a valid
1798 // insertion point.
1799 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate.
1800 if (InsertionPt != BB->end()) {
1801 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc,
1802 InsertionPt);
1803 }
1804}
1805
1806/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1807/// of llvm.dbg.value intrinsics.
1809 bool Changed = false;
1810 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1813 for (auto &FI : F) {
1814 for (Instruction &BI : FI) {
1815 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI))
1816 Dbgs.push_back(DDI);
1817 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) {
1818 if (DVR.getType() == DbgVariableRecord::LocationType::Declare)
1819 DVRs.push_back(&DVR);
1820 }
1821 }
1822 }
1823
1824 if (Dbgs.empty() && DVRs.empty())
1825 return Changed;
1826
1827 auto LowerOne = [&](DbgVariableRecord *DDI) {
1828 AllocaInst *AI =
1829 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0));
1830 // If this is an alloca for a scalar variable, insert a dbg.value
1831 // at each load and store to the alloca and erase the dbg.declare.
1832 // The dbg.values allow tracking a variable even if it is not
1833 // stored on the stack, while the dbg.declare can only describe
1834 // the stack slot (and at a lexical-scope granularity). Later
1835 // passes will attempt to elide the stack slot.
1836 if (!AI || isArray(AI) || isStructure(AI))
1837 return;
1838
1839 // A volatile load/store means that the alloca can't be elided anyway.
1840 if (llvm::any_of(AI->users(), [](User *U) -> bool {
1841 if (LoadInst *LI = dyn_cast<LoadInst>(U))
1842 return LI->isVolatile();
1843 if (StoreInst *SI = dyn_cast<StoreInst>(U))
1844 return SI->isVolatile();
1845 return false;
1846 }))
1847 return;
1848
1850 WorkList.push_back(AI);
1851 while (!WorkList.empty()) {
1852 const Value *V = WorkList.pop_back_val();
1853 for (const auto &AIUse : V->uses()) {
1854 User *U = AIUse.getUser();
1855 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1856 if (AIUse.getOperandNo() == 1)
1858 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1859 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1860 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1861 // This is a call by-value or some other instruction that takes a
1862 // pointer to the variable. Insert a *value* intrinsic that describes
1863 // the variable by dereferencing the alloca.
1864 if (!CI->isLifetimeStartOrEnd()) {
1865 DebugLoc NewLoc = getDebugValueLoc(DDI);
1866 auto *DerefExpr =
1867 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1868 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(),
1869 DerefExpr, NewLoc,
1870 CI->getIterator());
1871 }
1872 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1873 if (BI->getType()->isPointerTy())
1874 WorkList.push_back(BI);
1875 }
1876 }
1877 }
1878 DDI->eraseFromParent();
1879 Changed = true;
1880 };
1881
1882 for_each(DVRs, LowerOne);
1883
1884 if (Changed)
1885 for (BasicBlock &BB : F)
1887
1888 return Changed;
1889}
1890
1891/// Propagate dbg.value records through the newly inserted PHIs.
1893 SmallVectorImpl<PHINode *> &InsertedPHIs) {
1894 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from.");
1895 if (InsertedPHIs.size() == 0)
1896 return;
1897
1898 // Map existing PHI nodes to their DbgVariableRecords.
1900 for (auto &I : *BB) {
1901 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1902 for (Value *V : DVR.location_ops())
1903 if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1904 DbgValueMap.insert({Loc, &DVR});
1905 }
1906 }
1907 if (DbgValueMap.size() == 0)
1908 return;
1909
1910 // Map a pair of the destination BB and old DbgVariableRecord to the new
1911 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use
1912 // more than one of the inserted PHIs in the same destination BB, we can
1913 // update the same DbgVariableRecord with all the new PHIs instead of creating
1914 // one copy for each.
1916 NewDbgValueMap;
1917 // Then iterate through the new PHIs and look to see if they use one of the
1918 // previously mapped PHIs. If so, create a new DbgVariableRecord that will
1919 // propagate the info through the new PHI. If we use more than one new PHI in
1920 // a single destination BB with the same old dbg.value, merge the updates so
1921 // that we get a single new DbgVariableRecord with all the new PHIs.
1922 for (auto PHI : InsertedPHIs) {
1923 BasicBlock *Parent = PHI->getParent();
1924 // Avoid inserting a debug-info record into an EH block.
1925 if (Parent->getFirstNonPHIIt()->isEHPad())
1926 continue;
1927 for (auto VI : PHI->operand_values()) {
1928 auto V = DbgValueMap.find(VI);
1929 if (V != DbgValueMap.end()) {
1930 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second);
1931 auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1932 if (NewDI == NewDbgValueMap.end()) {
1933 DbgVariableRecord *NewDbgII = DbgII->clone();
1934 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1935 }
1936 DbgVariableRecord *NewDbgII = NewDI->second;
1937 // If PHI contains VI as an operand more than once, we may
1938 // replaced it in NewDbgII; confirm that it is present.
1939 if (is_contained(NewDbgII->location_ops(), VI))
1940 NewDbgII->replaceVariableLocationOp(VI, PHI);
1941 }
1942 }
1943 }
1944 // Insert the new DbgVariableRecords into their destination blocks.
1945 for (auto DI : NewDbgValueMap) {
1946 BasicBlock *Parent = DI.first.first;
1947 DbgVariableRecord *NewDbgII = DI.second;
1948 auto InsertionPt = Parent->getFirstInsertionPt();
1949 assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1950
1951 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt);
1952 }
1953}
1954
1956 DIBuilder &Builder, uint8_t DIExprFlags,
1957 int Offset) {
1959
1960 auto ReplaceOne = [&](DbgVariableRecord *DII) {
1961 assert(DII->getVariable() && "Missing variable");
1962 auto *DIExpr = DII->getExpression();
1963 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1964 DII->setExpression(DIExpr);
1965 DII->replaceVariableLocationOp(Address, NewAddress);
1966 };
1967
1968 for_each(DVRDeclares, ReplaceOne);
1969
1970 return !DVRDeclares.empty();
1971}
1972
1974 DILocalVariable *DIVar,
1975 DIExpression *DIExpr, Value *NewAddress,
1976 DbgVariableRecord *DVR,
1977 DIBuilder &Builder, int Offset) {
1978 assert(DIVar && "Missing variable");
1979
1980 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it
1981 // should do with the alloca pointer is dereference it. Otherwise we don't
1982 // know how to handle it and give up.
1983 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1984 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1985 return;
1986
1987 // Insert the offset before the first deref.
1988 if (Offset)
1989 DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1990
1991 DVR->setExpression(DIExpr);
1992 DVR->replaceVariableLocationOp(0u, NewAddress);
1993}
1994
1996 DIBuilder &Builder, int Offset) {
1998 findDbgValues(AI, DPUsers);
1999
2000 // Replace any DbgVariableRecords that use this alloca.
2001 for (DbgVariableRecord *DVR : DPUsers)
2002 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(),
2003 DVR->getExpression(), NewAllocaAddress, DVR,
2004 Builder, Offset);
2005}
2006
2007/// Where possible to salvage debug information for \p I do so.
2008/// If not possible mark undef.
2014
2015template <typename T> static void salvageDbgAssignAddress(T *Assign) {
2016 Instruction *I = dyn_cast<Instruction>(Assign->getAddress());
2017 // Only instructions can be salvaged at the moment.
2018 if (!I)
2019 return;
2020
2021 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() &&
2022 "address-expression shouldn't have fragment info");
2023
2024 // The address component of a dbg.assign cannot be variadic.
2025 uint64_t CurrentLocOps = 0;
2026 SmallVector<Value *, 4> AdditionalValues;
2028 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
2029
2030 // Check if the salvage failed.
2031 if (!NewV)
2032 return;
2033
2035 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false);
2036 assert(!SalvagedExpr->getFragmentInfo().has_value() &&
2037 "address-expression shouldn't have fragment info");
2038
2039 SalvagedExpr = SalvagedExpr->foldConstantMath();
2040
2041 // Salvage succeeds if no additional values are required.
2042 if (AdditionalValues.empty()) {
2043 Assign->setAddress(NewV);
2044 Assign->setAddressExpression(SalvagedExpr);
2045 } else {
2046 Assign->setKillAddress();
2047 }
2048}
2049
2052 // These are arbitrary chosen limits on the maximum number of values and the
2053 // maximum size of a debug expression we can salvage up to, used for
2054 // performance reasons.
2055 const unsigned MaxDebugArgs = 16;
2056 const unsigned MaxExpressionSize = 128;
2057 bool Salvaged = false;
2058
2059 for (auto *DVR : DPUsers) {
2060 if (DVR->isDbgAssign()) {
2061 if (DVR->getAddress() == &I) {
2063 Salvaged = true;
2064 }
2065 if (DVR->getValue() != &I)
2066 continue;
2067 }
2068
2069 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
2070 // are implicitly pointing out the value as a DWARF memory location
2071 // description.
2072 bool StackValue =
2074 auto DVRLocation = DVR->location_ops();
2075 assert(
2076 is_contained(DVRLocation, &I) &&
2077 "DbgVariableIntrinsic must use salvaged instruction as its location");
2078 SmallVector<Value *, 4> AdditionalValues;
2079 // 'I' may appear more than once in DVR's location ops, and each use of 'I'
2080 // must be updated in the DIExpression and potentially have additional
2081 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in
2082 // DVRLocation.
2083 Value *Op0 = nullptr;
2084 DIExpression *SalvagedExpr = DVR->getExpression();
2085 auto LocItr = find(DVRLocation, &I);
2086 while (SalvagedExpr && LocItr != DVRLocation.end()) {
2088 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr);
2089 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
2090 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
2091 if (!Op0)
2092 break;
2093 SalvagedExpr =
2094 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
2095 LocItr = std::find(++LocItr, DVRLocation.end(), &I);
2096 }
2097 // salvageDebugInfoImpl should fail on examining the first element of
2098 // DbgUsers, or none of them.
2099 if (!Op0)
2100 break;
2101
2102 SalvagedExpr = SalvagedExpr->foldConstantMath();
2103 DVR->replaceVariableLocationOp(&I, Op0);
2104 bool IsValidSalvageExpr =
2105 SalvagedExpr->getNumElements() <= MaxExpressionSize;
2106 if (AdditionalValues.empty() && IsValidSalvageExpr) {
2107 DVR->setExpression(SalvagedExpr);
2108 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare &&
2109 IsValidSalvageExpr &&
2110 DVR->getNumVariableLocationOps() + AdditionalValues.size() <=
2111 MaxDebugArgs) {
2112 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr);
2113 } else {
2114 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
2115 // currently only valid for stack value expressions.
2116 // Also do not salvage if the resulting DIArgList would contain an
2117 // unreasonably large number of values.
2118 DVR->setKillLocation();
2119 }
2120 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n');
2121 Salvaged = true;
2122 }
2123
2124 if (Salvaged)
2125 return;
2126
2127 for (auto *DVR : DPUsers)
2128 DVR->setKillLocation();
2129}
2130
2132 uint64_t CurrentLocOps,
2134 SmallVectorImpl<Value *> &AdditionalValues) {
2135 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
2136 // Rewrite a GEP into a DIExpression.
2137 SmallMapVector<Value *, APInt, 4> VariableOffsets;
2138 APInt ConstantOffset(BitWidth, 0);
2139 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
2140 return nullptr;
2141 if (!VariableOffsets.empty() && !CurrentLocOps) {
2142 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
2143 CurrentLocOps = 1;
2144 }
2145 for (const auto &Offset : VariableOffsets) {
2146 AdditionalValues.push_back(Offset.first);
2147 assert(Offset.second.isStrictlyPositive() &&
2148 "Expected strictly positive multiplier for offset.");
2149 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
2150 Offset.second.getZExtValue(), dwarf::DW_OP_mul,
2151 dwarf::DW_OP_plus});
2152 }
2153 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
2154 return GEP->getOperand(0);
2155}
2156
2158 switch (Opcode) {
2159 case Instruction::Add:
2160 return dwarf::DW_OP_plus;
2161 case Instruction::Sub:
2162 return dwarf::DW_OP_minus;
2163 case Instruction::Mul:
2164 return dwarf::DW_OP_mul;
2165 case Instruction::SDiv:
2166 return dwarf::DW_OP_div;
2167 case Instruction::SRem:
2168 return dwarf::DW_OP_mod;
2169 case Instruction::Or:
2170 return dwarf::DW_OP_or;
2171 case Instruction::And:
2172 return dwarf::DW_OP_and;
2173 case Instruction::Xor:
2174 return dwarf::DW_OP_xor;
2175 case Instruction::Shl:
2176 return dwarf::DW_OP_shl;
2177 case Instruction::LShr:
2178 return dwarf::DW_OP_shr;
2179 case Instruction::AShr:
2180 return dwarf::DW_OP_shra;
2181 default:
2182 // TODO: Salvage from each kind of binop we know about.
2183 return 0;
2184 }
2185}
2186
2187static void handleSSAValueOperands(uint64_t CurrentLocOps,
2189 SmallVectorImpl<Value *> &AdditionalValues,
2190 Instruction *I) {
2191 if (!CurrentLocOps) {
2192 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2193 CurrentLocOps = 1;
2194 }
2195 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2196 AdditionalValues.push_back(I->getOperand(1));
2197}
2198
2201 SmallVectorImpl<Value *> &AdditionalValues) {
2202 // Handle binary operations with constant integer operands as a special case.
2203 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2204 // Values wider than 64 bits cannot be represented within a DIExpression.
2205 if (ConstInt && ConstInt->getBitWidth() > 64)
2206 return nullptr;
2207
2208 Instruction::BinaryOps BinOpcode = BI->getOpcode();
2209 // Push any Constant Int operand onto the expression stack.
2210 if (ConstInt) {
2211 uint64_t Val = ConstInt->getSExtValue();
2212 // Add or Sub Instructions with a constant operand can potentially be
2213 // simplified.
2214 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2215 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2217 return BI->getOperand(0);
2218 }
2219 Opcodes.append({dwarf::DW_OP_constu, Val});
2220 } else {
2221 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2222 }
2223
2224 // Add salvaged binary operator to expression stack, if it has a valid
2225 // representation in a DIExpression.
2226 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2227 if (!DwarfBinOp)
2228 return nullptr;
2229 Opcodes.push_back(DwarfBinOp);
2230 return BI->getOperand(0);
2231}
2232
2234 // The signedness of the operation is implicit in the typed stack, signed and
2235 // unsigned instructions map to the same DWARF opcode.
2236 switch (Pred) {
2237 case CmpInst::ICMP_EQ:
2238 return dwarf::DW_OP_eq;
2239 case CmpInst::ICMP_NE:
2240 return dwarf::DW_OP_ne;
2241 case CmpInst::ICMP_UGT:
2242 case CmpInst::ICMP_SGT:
2243 return dwarf::DW_OP_gt;
2244 case CmpInst::ICMP_UGE:
2245 case CmpInst::ICMP_SGE:
2246 return dwarf::DW_OP_ge;
2247 case CmpInst::ICMP_ULT:
2248 case CmpInst::ICMP_SLT:
2249 return dwarf::DW_OP_lt;
2250 case CmpInst::ICMP_ULE:
2251 case CmpInst::ICMP_SLE:
2252 return dwarf::DW_OP_le;
2253 default:
2254 return 0;
2255 }
2256}
2257
2260 SmallVectorImpl<Value *> &AdditionalValues) {
2261 // Handle icmp operations with constant integer operands as a special case.
2262 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2263 // Values wider than 64 bits cannot be represented within a DIExpression.
2264 if (ConstInt && ConstInt->getBitWidth() > 64)
2265 return nullptr;
2266 // Push any Constant Int operand onto the expression stack.
2267 if (ConstInt) {
2268 if (Icmp->isSigned())
2269 Opcodes.push_back(dwarf::DW_OP_consts);
2270 else
2271 Opcodes.push_back(dwarf::DW_OP_constu);
2272 uint64_t Val = ConstInt->getSExtValue();
2273 Opcodes.push_back(Val);
2274 } else {
2275 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2276 }
2277
2278 // Add salvaged binary operator to expression stack, if it has a valid
2279 // representation in a DIExpression.
2280 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2281 if (!DwarfIcmpOp)
2282 return nullptr;
2283 Opcodes.push_back(DwarfIcmpOp);
2284 return Icmp->getOperand(0);
2285}
2286
2289 SmallVectorImpl<Value *> &AdditionalValues) {
2290 auto &M = *I.getModule();
2291 auto &DL = M.getDataLayout();
2292
2293 if (auto *CI = dyn_cast<CastInst>(&I)) {
2294 Value *FromValue = CI->getOperand(0);
2295 // No-op casts are irrelevant for debug info.
2296 if (CI->isNoopCast(DL)) {
2297 return FromValue;
2298 }
2299
2300 Type *Type = CI->getType();
2301 if (Type->isPointerTy())
2302 Type = DL.getIntPtrType(Type);
2303 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2304 if (Type->isVectorTy() ||
2307 return nullptr;
2308
2309 llvm::Type *FromType = FromValue->getType();
2310 if (FromType->isPointerTy())
2311 FromType = DL.getIntPtrType(FromType);
2312
2313 unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2314 unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2315
2316 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2317 isa<SExtInst>(&I));
2318 Ops.append(ExtOps.begin(), ExtOps.end());
2319 return FromValue;
2320 }
2321
2322 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2323 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2324 if (auto *BI = dyn_cast<BinaryOperator>(&I))
2325 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2326 if (auto *IC = dyn_cast<ICmpInst>(&I))
2327 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2328
2329 // *Not* to do: we should not attempt to salvage load instructions,
2330 // because the validity and lifetime of a dbg.value containing
2331 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2332 return nullptr;
2333}
2334
2335/// A replacement for a dbg.value expression.
2336using DbgValReplacement = std::optional<DIExpression *>;
2337
2338/// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2339/// possibly moving/undefing users to prevent use-before-def. Returns true if
2340/// changes are made.
2342 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2343 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) {
2344 // Find debug users of From.
2346 findDbgUsers(&From, DPUsers);
2347 if (DPUsers.empty())
2348 return false;
2349
2350 // Prevent use-before-def of To.
2351 bool Changed = false;
2352
2353 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
2354 if (isa<Instruction>(&To)) {
2355 bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
2356
2357 // DbgVariableRecord implementation of the above.
2358 for (auto *DVR : DPUsers) {
2359 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr;
2360 Instruction *NextNonDebug = MarkedInstr;
2361
2362 // It's common to see a debug user between From and DomPoint. Move it
2363 // after DomPoint to preserve the variable update without any reordering.
2364 if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
2365 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
2366 DVR->removeFromParent();
2367 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint);
2368 Changed = true;
2369
2370 // Users which otherwise aren't dominated by the replacement value must
2371 // be salvaged or deleted.
2372 } else if (!DT.dominates(&DomPoint, MarkedInstr)) {
2373 UndefOrSalvageDVR.insert(DVR);
2374 }
2375 }
2376 }
2377
2378 // Update debug users without use-before-def risk.
2379 for (auto *DVR : DPUsers) {
2380 if (UndefOrSalvageDVR.count(DVR))
2381 continue;
2382
2383 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR);
2384 if (!DVRepl)
2385 continue;
2386
2387 DVR->replaceVariableLocationOp(&From, &To);
2388 DVR->setExpression(*DVRepl);
2389 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n');
2390 Changed = true;
2391 }
2392
2393 if (!UndefOrSalvageDVR.empty()) {
2394 // Try to salvage the remaining debug users.
2395 salvageDebugInfo(From);
2396 Changed = true;
2397 }
2398
2399 return Changed;
2400}
2401
2402/// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2403/// losslessly preserve the bits and semantics of the value. This predicate is
2404/// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2405///
2406/// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2407/// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2408/// and also does not allow lossless pointer <-> integer conversions.
2410 Type *ToTy) {
2411 // Trivially compatible types.
2412 if (FromTy == ToTy)
2413 return true;
2414
2415 // Handle compatible pointer <-> integer conversions.
2416 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2417 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2418 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2419 !DL.isNonIntegralPointerType(ToTy);
2420 return SameSize && LosslessConversion;
2421 }
2422
2423 // TODO: This is not exhaustive.
2424 return false;
2425}
2426
2428 Instruction &DomPoint, DominatorTree &DT) {
2429 // Exit early if From has no debug users.
2430 if (!From.isUsedByMetadata())
2431 return false;
2432
2433 assert(&From != &To && "Can't replace something with itself");
2434
2435 Type *FromTy = From.getType();
2436 Type *ToTy = To.getType();
2437
2438 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2439 return DVR.getExpression();
2440 };
2441
2442 // Handle no-op conversions.
2443 Module &M = *From.getModule();
2444 const DataLayout &DL = M.getDataLayout();
2445 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2446 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2447
2448 // Handle integer-to-integer widening and narrowing.
2449 // FIXME: Use DW_OP_convert when it's available everywhere.
2450 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2451 uint64_t FromBits = FromTy->getIntegerBitWidth();
2452 uint64_t ToBits = ToTy->getIntegerBitWidth();
2453 assert(FromBits != ToBits && "Unexpected no-op conversion");
2454
2455 // When the width of the result grows, assume that a debugger will only
2456 // access the low `FromBits` bits when inspecting the source variable.
2457 if (FromBits < ToBits)
2458 return rewriteDebugUsers(From, To, DomPoint, DT, IdentityDVR);
2459
2460 // The width of the result has shrunk. Use sign/zero extension to describe
2461 // the source variable's high bits.
2462 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement {
2463 DILocalVariable *Var = DVR.getVariable();
2464
2465 // Without knowing signedness, sign/zero extension isn't possible.
2466 auto Signedness = Var->getSignedness();
2467 if (!Signedness)
2468 return std::nullopt;
2469
2470 bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2471 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits,
2472 Signed);
2473 };
2474 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExtDVR);
2475 }
2476
2477 // TODO: Floating-point conversions, vectors.
2478 return false;
2479}
2480
2482 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) {
2483 bool Changed = false;
2484 // RemoveDIs: erase debug-info on this instruction manually.
2485 I->dropDbgRecords();
2486 for (Use &U : I->operands()) {
2487 Value *Op = U.get();
2488 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) {
2489 U.set(PoisonValue::get(Op->getType()));
2490 PoisonedValues.push_back(Op);
2491 Changed = true;
2492 }
2493 }
2494
2495 return Changed;
2496}
2497
2499 unsigned NumDeadInst = 0;
2500 // Delete the instructions backwards, as it has a reduced likelihood of
2501 // having to update as many def-use and use-def chains.
2502 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2505
2506 while (EndInst != &BB->front()) {
2507 // Delete the next to last instruction.
2508 Instruction *Inst = &*--EndInst->getIterator();
2509 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2511 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2512 // EHPads can't have DbgVariableRecords attached to them, but it might be
2513 // possible for things with token type.
2514 Inst->dropDbgRecords();
2515 EndInst = Inst;
2516 continue;
2517 }
2518 ++NumDeadInst;
2519 // RemoveDIs: erasing debug-info must be done manually.
2520 Inst->dropDbgRecords();
2521 Inst->eraseFromParent();
2522 }
2523 return NumDeadInst;
2524}
2525
2526unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2527 DomTreeUpdater *DTU,
2528 MemorySSAUpdater *MSSAU) {
2529 BasicBlock *BB = I->getParent();
2530
2531 if (MSSAU)
2532 MSSAU->changeToUnreachable(I);
2533
2534 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
2535
2536 // Loop over all of the successors, removing BB's entry from any PHI
2537 // nodes.
2538 for (BasicBlock *Successor : successors(BB)) {
2539 Successor->removePredecessor(BB, PreserveLCSSA);
2540 if (DTU)
2541 UniqueSuccessors.insert(Successor);
2542 }
2543 auto *UI = new UnreachableInst(I->getContext(), I->getIterator());
2544 UI->setDebugLoc(I->getDebugLoc());
2545
2546 // All instructions after this are dead.
2547 unsigned NumInstrsRemoved = 0;
2548 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2549 while (BBI != BBE) {
2550 if (!BBI->use_empty())
2551 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2552 BBI++->eraseFromParent();
2553 ++NumInstrsRemoved;
2554 }
2555 if (DTU) {
2557 Updates.reserve(UniqueSuccessors.size());
2558 for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2559 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2560 DTU->applyUpdates(Updates);
2561 }
2563 return NumInstrsRemoved;
2564}
2565
2567 SmallVector<Value *, 8> Args(II->args());
2569 II->getOperandBundlesAsDefs(OpBundles);
2570 CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2571 II->getCalledOperand(), Args, OpBundles);
2572 NewCall->setCallingConv(II->getCallingConv());
2573 NewCall->setAttributes(II->getAttributes());
2574 NewCall->setDebugLoc(II->getDebugLoc());
2575 NewCall->copyMetadata(*II);
2576
2577 // If the invoke had profile metadata, try converting them for CallInst.
2578 uint64_t TotalWeight;
2579 if (NewCall->extractProfTotalWeight(TotalWeight)) {
2580 // Set the total weight if it fits into i32, otherwise reset.
2581 MDBuilder MDB(NewCall->getContext());
2582 auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2583 ? nullptr
2584 : MDB.createBranchWeights({uint32_t(TotalWeight)});
2585 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2586 }
2587
2588 return NewCall;
2589}
2590
2591// changeToCall - Convert the specified invoke into a normal call.
2594 NewCall->takeName(II);
2595 NewCall->insertBefore(II->getIterator());
2596 II->replaceAllUsesWith(NewCall);
2597
2598 // Follow the call by a branch to the normal destination.
2599 BasicBlock *NormalDestBB = II->getNormalDest();
2600 auto *BI = BranchInst::Create(NormalDestBB, II->getIterator());
2601 // Although it takes place after the call itself, the new branch is still
2602 // performing part of the control-flow functionality of the invoke, so we use
2603 // II's DebugLoc.
2604 BI->setDebugLoc(II->getDebugLoc());
2605
2606 // Update PHI nodes in the unwind destination
2607 BasicBlock *BB = II->getParent();
2608 BasicBlock *UnwindDestBB = II->getUnwindDest();
2609 UnwindDestBB->removePredecessor(BB);
2610 II->eraseFromParent();
2611 if (DTU)
2612 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2613 return NewCall;
2614}
2615
2617 BasicBlock *UnwindEdge,
2618 DomTreeUpdater *DTU) {
2619 BasicBlock *BB = CI->getParent();
2620
2621 // Convert this function call into an invoke instruction. First, split the
2622 // basic block.
2623 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2624 CI->getName() + ".noexc");
2625
2626 // Delete the unconditional branch inserted by SplitBlock
2627 BB->back().eraseFromParent();
2628
2629 // Create the new invoke instruction.
2630 SmallVector<Value *, 8> InvokeArgs(CI->args());
2632
2633 CI->getOperandBundlesAsDefs(OpBundles);
2634
2635 // Note: we're round tripping operand bundles through memory here, and that
2636 // can potentially be avoided with a cleverer API design that we do not have
2637 // as of this time.
2638
2639 InvokeInst *II =
2641 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2642 II->setDebugLoc(CI->getDebugLoc());
2643 II->setCallingConv(CI->getCallingConv());
2644 II->setAttributes(CI->getAttributes());
2645 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2646
2647 if (DTU)
2648 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2649
2650 // Make sure that anything using the call now uses the invoke! This also
2651 // updates the CallGraph if present, because it uses a WeakTrackingVH.
2653
2654 // Delete the original call
2655 Split->front().eraseFromParent();
2656 return Split;
2657}
2658
2661 DomTreeUpdater *DTU = nullptr) {
2663 BasicBlock *BB = &F.front();
2664 Worklist.push_back(BB);
2665 Reachable.insert(BB);
2666 bool Changed = false;
2667 do {
2668 BB = Worklist.pop_back_val();
2669
2670 // Do a quick scan of the basic block, turning any obviously unreachable
2671 // instructions into LLVM unreachable insts. The instruction combining pass
2672 // canonicalizes unreachable insts into stores to null or undef.
2673 for (Instruction &I : *BB) {
2674 if (auto *CI = dyn_cast<CallInst>(&I)) {
2675 Value *Callee = CI->getCalledOperand();
2676 // Handle intrinsic calls.
2677 if (Function *F = dyn_cast<Function>(Callee)) {
2678 auto IntrinsicID = F->getIntrinsicID();
2679 // Assumptions that are known to be false are equivalent to
2680 // unreachable. Also, if the condition is undefined, then we make the
2681 // choice most beneficial to the optimizer, and choose that to also be
2682 // unreachable.
2683 if (IntrinsicID == Intrinsic::assume) {
2684 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2685 // Don't insert a call to llvm.trap right before the unreachable.
2686 changeToUnreachable(CI, false, DTU);
2687 Changed = true;
2688 break;
2689 }
2690 } else if (IntrinsicID == Intrinsic::experimental_guard) {
2691 // A call to the guard intrinsic bails out of the current
2692 // compilation unit if the predicate passed to it is false. If the
2693 // predicate is a constant false, then we know the guard will bail
2694 // out of the current compile unconditionally, so all code following
2695 // it is dead.
2696 //
2697 // Note: unlike in llvm.assume, it is not "obviously profitable" for
2698 // guards to treat `undef` as `false` since a guard on `undef` can
2699 // still be useful for widening.
2700 if (match(CI->getArgOperand(0), m_Zero()))
2701 if (!isa<UnreachableInst>(CI->getNextNode())) {
2702 changeToUnreachable(CI->getNextNode(), false, DTU);
2703 Changed = true;
2704 break;
2705 }
2706 }
2707 } else if ((isa<ConstantPointerNull>(Callee) &&
2708 !NullPointerIsDefined(CI->getFunction(),
2709 cast<PointerType>(Callee->getType())
2710 ->getAddressSpace())) ||
2711 isa<UndefValue>(Callee)) {
2712 changeToUnreachable(CI, false, DTU);
2713 Changed = true;
2714 break;
2715 }
2716 if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2717 // If we found a call to a no-return function, insert an unreachable
2718 // instruction after it. Make sure there isn't *already* one there
2719 // though.
2720 if (!isa<UnreachableInst>(CI->getNextNode())) {
2721 // Don't insert a call to llvm.trap right before the unreachable.
2722 changeToUnreachable(CI->getNextNode(), false, DTU);
2723 Changed = true;
2724 }
2725 break;
2726 }
2727 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2728 // Store to undef and store to null are undefined and used to signal
2729 // that they should be changed to unreachable by passes that can't
2730 // modify the CFG.
2731
2732 // Don't touch volatile stores.
2733 if (SI->isVolatile()) continue;
2734
2735 Value *Ptr = SI->getOperand(1);
2736
2737 if (isa<UndefValue>(Ptr) ||
2739 !NullPointerIsDefined(SI->getFunction(),
2740 SI->getPointerAddressSpace()))) {
2741 changeToUnreachable(SI, false, DTU);
2742 Changed = true;
2743 break;
2744 }
2745 }
2746 }
2747
2748 Instruction *Terminator = BB->getTerminator();
2749 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2750 // Turn invokes that call 'nounwind' functions into ordinary calls.
2751 Value *Callee = II->getCalledOperand();
2752 if ((isa<ConstantPointerNull>(Callee) &&
2753 !NullPointerIsDefined(BB->getParent())) ||
2754 isa<UndefValue>(Callee)) {
2755 changeToUnreachable(II, false, DTU);
2756 Changed = true;
2757 } else {
2758 if (II->doesNotReturn() &&
2759 !isa<UnreachableInst>(II->getNormalDest()->front())) {
2760 // If we found an invoke of a no-return function,
2761 // create a new empty basic block with an `unreachable` terminator,
2762 // and set it as the normal destination for the invoke,
2763 // unless that is already the case.
2764 // Note that the original normal destination could have other uses.
2765 BasicBlock *OrigNormalDest = II->getNormalDest();
2766 OrigNormalDest->removePredecessor(II->getParent());
2767 LLVMContext &Ctx = II->getContext();
2768 BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2769 Ctx, OrigNormalDest->getName() + ".unreachable",
2770 II->getFunction(), OrigNormalDest);
2771 auto *UI = new UnreachableInst(Ctx, UnreachableNormalDest);
2772 UI->setDebugLoc(DebugLoc::getTemporary());
2773 II->setNormalDest(UnreachableNormalDest);
2774 if (DTU)
2775 DTU->applyUpdates(
2776 {{DominatorTree::Delete, BB, OrigNormalDest},
2777 {DominatorTree::Insert, BB, UnreachableNormalDest}});
2778 Changed = true;
2779 }
2780 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2781 if (II->use_empty() && !II->mayHaveSideEffects()) {
2782 // jump to the normal destination branch.
2783 BasicBlock *NormalDestBB = II->getNormalDest();
2784 BasicBlock *UnwindDestBB = II->getUnwindDest();
2785 BranchInst::Create(NormalDestBB, II->getIterator());
2786 UnwindDestBB->removePredecessor(II->getParent());
2787 II->eraseFromParent();
2788 if (DTU)
2789 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2790 } else
2791 changeToCall(II, DTU);
2792 Changed = true;
2793 }
2794 }
2795 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2796 // Remove catchpads which cannot be reached.
2797 struct CatchPadDenseMapInfo {
2798 static CatchPadInst *getEmptyKey() {
2800 }
2801
2802 static CatchPadInst *getTombstoneKey() {
2804 }
2805
2806 static unsigned getHashValue(CatchPadInst *CatchPad) {
2807 return static_cast<unsigned>(hash_combine_range(
2808 CatchPad->value_op_begin(), CatchPad->value_op_end()));
2809 }
2810
2811 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2812 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2813 RHS == getEmptyKey() || RHS == getTombstoneKey())
2814 return LHS == RHS;
2815 return LHS->isIdenticalTo(RHS);
2816 }
2817 };
2818
2819 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2820 // Set of unique CatchPads.
2822 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2823 HandlerSet;
2825 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2826 E = CatchSwitch->handler_end();
2827 I != E; ++I) {
2828 BasicBlock *HandlerBB = *I;
2829 if (DTU)
2830 ++NumPerSuccessorCases[HandlerBB];
2831 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHIIt());
2832 if (!HandlerSet.insert({CatchPad, Empty}).second) {
2833 if (DTU)
2834 --NumPerSuccessorCases[HandlerBB];
2835 CatchSwitch->removeHandler(I);
2836 --I;
2837 --E;
2838 Changed = true;
2839 }
2840 }
2841 if (DTU) {
2842 std::vector<DominatorTree::UpdateType> Updates;
2843 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2844 if (I.second == 0)
2845 Updates.push_back({DominatorTree::Delete, BB, I.first});
2846 DTU->applyUpdates(Updates);
2847 }
2848 }
2849
2850 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2851 for (BasicBlock *Successor : successors(BB))
2852 if (Reachable.insert(Successor).second)
2853 Worklist.push_back(Successor);
2854 } while (!Worklist.empty());
2855 return Changed;
2856}
2857
2859 Instruction *TI = BB->getTerminator();
2860
2861 if (auto *II = dyn_cast<InvokeInst>(TI))
2862 return changeToCall(II, DTU);
2863
2864 Instruction *NewTI;
2865 BasicBlock *UnwindDest;
2866
2867 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2868 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator());
2869 UnwindDest = CRI->getUnwindDest();
2870 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2871 auto *NewCatchSwitch = CatchSwitchInst::Create(
2872 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2873 CatchSwitch->getName(), CatchSwitch->getIterator());
2874 for (BasicBlock *PadBB : CatchSwitch->handlers())
2875 NewCatchSwitch->addHandler(PadBB);
2876
2877 NewTI = NewCatchSwitch;
2878 UnwindDest = CatchSwitch->getUnwindDest();
2879 } else {
2880 llvm_unreachable("Could not find unwind successor");
2881 }
2882
2883 NewTI->takeName(TI);
2884 NewTI->setDebugLoc(TI->getDebugLoc());
2885 UnwindDest->removePredecessor(BB);
2886 TI->replaceAllUsesWith(NewTI);
2887 TI->eraseFromParent();
2888 if (DTU)
2889 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2890 return NewTI;
2891}
2892
2893/// removeUnreachableBlocks - Remove blocks that are not reachable, even
2894/// if they are in a dead cycle. Return true if a change was made, false
2895/// otherwise.
2897 MemorySSAUpdater *MSSAU) {
2899 bool Changed = markAliveBlocks(F, Reachable, DTU);
2900
2901 // If there are unreachable blocks in the CFG...
2902 if (Reachable.size() == F.size())
2903 return Changed;
2904
2905 assert(Reachable.size() < F.size());
2906
2907 // Are there any blocks left to actually delete?
2908 SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2909 for (BasicBlock &BB : F) {
2910 // Skip reachable basic blocks
2911 if (Reachable.count(&BB))
2912 continue;
2913 // Skip already-deleted blocks
2914 if (DTU && DTU->isBBPendingDeletion(&BB))
2915 continue;
2916 BlocksToRemove.insert(&BB);
2917 }
2918
2919 if (BlocksToRemove.empty())
2920 return Changed;
2921
2922 Changed = true;
2923 NumRemoved += BlocksToRemove.size();
2924
2925 if (MSSAU)
2926 MSSAU->removeBlocks(BlocksToRemove);
2927
2928 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2929
2930 return Changed;
2931}
2932
2933/// If AAOnly is set, only intersect alias analysis metadata and preserve other
2934/// known metadata. Unknown metadata is always dropped.
2935static void combineMetadata(Instruction *K, const Instruction *J,
2936 bool DoesKMove, bool AAOnly = false) {
2938 K->getAllMetadataOtherThanDebugLoc(Metadata);
2939 for (const auto &MD : Metadata) {
2940 unsigned Kind = MD.first;
2941 MDNode *JMD = J->getMetadata(Kind);
2942 MDNode *KMD = MD.second;
2943
2944 // TODO: Assert that this switch is exhaustive for fixed MD kinds.
2945 switch (Kind) {
2946 default:
2947 K->setMetadata(Kind, nullptr); // Remove unknown metadata
2948 break;
2949 case LLVMContext::MD_dbg:
2950 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2951 case LLVMContext::MD_DIAssignID:
2952 if (!AAOnly)
2953 K->mergeDIAssignID(J);
2954 break;
2955 case LLVMContext::MD_tbaa:
2956 if (DoesKMove)
2957 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2958 break;
2959 case LLVMContext::MD_alias_scope:
2960 if (DoesKMove)
2961 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2962 break;
2963 case LLVMContext::MD_noalias:
2964 case LLVMContext::MD_mem_parallel_loop_access:
2965 if (DoesKMove)
2966 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2967 break;
2968 case LLVMContext::MD_access_group:
2969 if (DoesKMove)
2970 K->setMetadata(LLVMContext::MD_access_group,
2971 intersectAccessGroups(K, J));
2972 break;
2973 case LLVMContext::MD_range:
2974 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2975 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2976 break;
2977 case LLVMContext::MD_nofpclass:
2978 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2979 K->setMetadata(Kind, MDNode::getMostGenericNoFPClass(JMD, KMD));
2980 break;
2981 case LLVMContext::MD_fpmath:
2982 if (!AAOnly)
2983 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2984 break;
2985 case LLVMContext::MD_invariant_load:
2986 // If K moves, only set the !invariant.load if it is present in both
2987 // instructions.
2988 if (DoesKMove)
2989 K->setMetadata(Kind, JMD);
2990 break;
2991 case LLVMContext::MD_nonnull:
2992 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
2993 K->setMetadata(Kind, JMD);
2994 break;
2995 case LLVMContext::MD_invariant_group:
2996 // Preserve !invariant.group in K.
2997 break;
2998 // Keep empty cases for prof, mmra, memprof, and callsite to prevent them
2999 // from being removed as unknown metadata. The actual merging is handled
3000 // separately below.
3001 case LLVMContext::MD_prof:
3002 case LLVMContext::MD_mmra:
3003 case LLVMContext::MD_memprof:
3004 case LLVMContext::MD_callsite:
3005 break;
3006 case LLVMContext::MD_callee_type:
3007 if (!AAOnly) {
3008 K->setMetadata(LLVMContext::MD_callee_type,
3010 }
3011 break;
3012 case LLVMContext::MD_align:
3013 if (!AAOnly && (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)))
3014 K->setMetadata(
3016 break;
3017 case LLVMContext::MD_dereferenceable:
3018 case LLVMContext::MD_dereferenceable_or_null:
3019 if (!AAOnly && DoesKMove)
3020 K->setMetadata(Kind,
3022 break;
3023 case LLVMContext::MD_preserve_access_index:
3024 // Preserve !preserve.access.index in K.
3025 break;
3026 case LLVMContext::MD_noundef:
3027 // If K does move, keep noundef if it is present in both instructions.
3028 if (!AAOnly && DoesKMove)
3029 K->setMetadata(Kind, JMD);
3030 break;
3031 case LLVMContext::MD_nontemporal:
3032 // Preserve !nontemporal if it is present on both instructions.
3033 if (!AAOnly)
3034 K->setMetadata(Kind, JMD);
3035 break;
3036 case LLVMContext::MD_noalias_addrspace:
3037 if (DoesKMove)
3038 K->setMetadata(Kind,
3040 break;
3041 case LLVMContext::MD_nosanitize:
3042 // Preserve !nosanitize if both K and J have it.
3043 K->setMetadata(Kind, JMD);
3044 break;
3045 case LLVMContext::MD_captures:
3046 K->setMetadata(
3048 K->getContext(), MDNode::toCaptureComponents(JMD) |
3050 break;
3051 case LLVMContext::MD_alloc_token:
3052 // Preserve !alloc_token if both K and J have it, and they are equal.
3053 if (KMD == JMD)
3054 K->setMetadata(Kind, JMD);
3055 else
3056 K->setMetadata(Kind, nullptr);
3057 break;
3058 }
3059 }
3060 // Set !invariant.group from J if J has it. If both instructions have it
3061 // then we will just pick it from J - even when they are different.
3062 // Also make sure that K is load or store - f.e. combining bitcast with load
3063 // could produce bitcast with invariant.group metadata, which is invalid.
3064 // FIXME: we should try to preserve both invariant.group md if they are
3065 // different, but right now instruction can only have one invariant.group.
3066 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
3067 if (isa<LoadInst>(K) || isa<StoreInst>(K))
3068 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
3069
3070 // Merge MMRAs.
3071 // This is handled separately because we also want to handle cases where K
3072 // doesn't have tags but J does.
3073 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra);
3074 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra);
3075 if (JMMRA || KMMRA) {
3076 K->setMetadata(LLVMContext::MD_mmra,
3077 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA));
3078 }
3079
3080 // Merge memprof metadata.
3081 // Handle separately to support cases where only one instruction has the
3082 // metadata.
3083 auto *JMemProf = J->getMetadata(LLVMContext::MD_memprof);
3084 auto *KMemProf = K->getMetadata(LLVMContext::MD_memprof);
3085 if (!AAOnly && (JMemProf || KMemProf)) {
3086 K->setMetadata(LLVMContext::MD_memprof,
3087 MDNode::getMergedMemProfMetadata(KMemProf, JMemProf));
3088 }
3089
3090 // Merge callsite metadata.
3091 // Handle separately to support cases where only one instruction has the
3092 // metadata.
3093 auto *JCallSite = J->getMetadata(LLVMContext::MD_callsite);
3094 auto *KCallSite = K->getMetadata(LLVMContext::MD_callsite);
3095 if (!AAOnly && (JCallSite || KCallSite)) {
3096 K->setMetadata(LLVMContext::MD_callsite,
3097 MDNode::getMergedCallsiteMetadata(KCallSite, JCallSite));
3098 }
3099
3100 // Merge prof metadata.
3101 // Handle separately to support cases where only one instruction has the
3102 // metadata.
3103 auto *JProf = J->getMetadata(LLVMContext::MD_prof);
3104 auto *KProf = K->getMetadata(LLVMContext::MD_prof);
3105 if (!AAOnly && (JProf || KProf)) {
3106 K->setMetadata(LLVMContext::MD_prof,
3107 MDNode::getMergedProfMetadata(KProf, JProf, K, J));
3108 }
3109}
3110
3112 bool DoesKMove) {
3113 combineMetadata(K, J, DoesKMove);
3114}
3115
3117 combineMetadata(K, J, /*DoesKMove=*/true, /*AAOnly=*/true);
3118}
3119
3120void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
3122 Source.getAllMetadata(MD);
3123 MDBuilder MDB(Dest.getContext());
3124 Type *NewType = Dest.getType();
3125 const DataLayout &DL = Source.getDataLayout();
3126 for (const auto &MDPair : MD) {
3127 unsigned ID = MDPair.first;
3128 MDNode *N = MDPair.second;
3129 // Note, essentially every kind of metadata should be preserved here! This
3130 // routine is supposed to clone a load instruction changing *only its type*.
3131 // The only metadata it makes sense to drop is metadata which is invalidated
3132 // when the pointer type changes. This should essentially never be the case
3133 // in LLVM, but we explicitly switch over only known metadata to be
3134 // conservatively correct. If you are adding metadata to LLVM which pertains
3135 // to loads, you almost certainly want to add it here.
3136 switch (ID) {
3137 case LLVMContext::MD_dbg:
3138 case LLVMContext::MD_tbaa:
3139 case LLVMContext::MD_prof:
3140 case LLVMContext::MD_fpmath:
3141 case LLVMContext::MD_tbaa_struct:
3142 case LLVMContext::MD_invariant_load:
3143 case LLVMContext::MD_alias_scope:
3144 case LLVMContext::MD_noalias:
3145 case LLVMContext::MD_nontemporal:
3146 case LLVMContext::MD_mem_parallel_loop_access:
3147 case LLVMContext::MD_access_group:
3148 case LLVMContext::MD_noundef:
3149 case LLVMContext::MD_noalias_addrspace:
3150 // All of these directly apply.
3151 Dest.setMetadata(ID, N);
3152 break;
3153
3154 case LLVMContext::MD_nonnull:
3155 copyNonnullMetadata(Source, N, Dest);
3156 break;
3157
3158 case LLVMContext::MD_align:
3159 case LLVMContext::MD_dereferenceable:
3160 case LLVMContext::MD_dereferenceable_or_null:
3161 // These only directly apply if the new type is also a pointer.
3162 if (NewType->isPointerTy())
3163 Dest.setMetadata(ID, N);
3164 break;
3165
3166 case LLVMContext::MD_range:
3167 copyRangeMetadata(DL, Source, N, Dest);
3168 break;
3169
3170 case LLVMContext::MD_nofpclass:
3171 // This only applies if the floating-point type interpretation. This
3172 // should handle degenerate cases like casting between a scalar and single
3173 // element vector.
3174 if (NewType->getScalarType() == Source.getType()->getScalarType())
3175 Dest.setMetadata(ID, N);
3176 break;
3177 }
3178 }
3179}
3180
3182 auto *ReplInst = dyn_cast<Instruction>(Repl);
3183 if (!ReplInst)
3184 return;
3185
3186 // Patch the replacement so that it is not more restrictive than the value
3187 // being replaced.
3188 WithOverflowInst *UnusedWO;
3189 // When replacing the result of a llvm.*.with.overflow intrinsic with a
3190 // overflowing binary operator, nuw/nsw flags may no longer hold.
3191 if (isa<OverflowingBinaryOperator>(ReplInst) &&
3193 ReplInst->dropPoisonGeneratingFlags();
3194 // Note that if 'I' is a load being replaced by some operation,
3195 // for example, by an arithmetic operation, then andIRFlags()
3196 // would just erase all math flags from the original arithmetic
3197 // operation, which is clearly not wanted and not needed.
3198 else if (!isa<LoadInst>(I))
3199 ReplInst->andIRFlags(I);
3200
3201 // Handle attributes.
3202 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) {
3203 if (auto *CB2 = dyn_cast<CallBase>(I)) {
3204 bool Success = CB1->tryIntersectAttributes(CB2);
3205 assert(Success && "We should not be trying to sink callbases "
3206 "with non-intersectable attributes");
3207 // For NDEBUG Compile.
3208 (void)Success;
3209 }
3210 }
3211
3212 // FIXME: If both the original and replacement value are part of the
3213 // same control-flow region (meaning that the execution of one
3214 // guarantees the execution of the other), then we can combine the
3215 // noalias scopes here and do better than the general conservative
3216 // answer used in combineMetadata().
3217
3218 // In general, GVN unifies expressions over different control-flow
3219 // regions, and so we need a conservative combination of the noalias
3220 // scopes.
3221 combineMetadataForCSE(ReplInst, I, false);
3222}
3223
3224template <typename ShouldReplaceFn>
3225static unsigned replaceDominatedUsesWith(Value *From, Value *To,
3226 const ShouldReplaceFn &ShouldReplace) {
3227 assert(From->getType() == To->getType());
3228
3229 unsigned Count = 0;
3230 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3231 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
3232 if (II && II->getIntrinsicID() == Intrinsic::fake_use)
3233 continue;
3234 if (!ShouldReplace(U))
3235 continue;
3236 LLVM_DEBUG(dbgs() << "Replace dominated use of '";
3237 From->printAsOperand(dbgs());
3238 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n");
3239 U.set(To);
3240 ++Count;
3241 }
3242 return Count;
3243}
3244
3246 assert(From->getType() == To->getType());
3247 auto *BB = From->getParent();
3248 unsigned Count = 0;
3249
3250 for (Use &U : llvm::make_early_inc_range(From->uses())) {
3251 auto *I = cast<Instruction>(U.getUser());
3252 if (I->getParent() == BB)
3253 continue;
3254 U.set(To);
3255 ++Count;
3256 }
3257 return Count;
3258}
3259
3261 DominatorTree &DT,
3262 const BasicBlockEdge &Root) {
3263 auto Dominates = [&](const Use &U) { return DT.dominates(Root, U); };
3264 return ::replaceDominatedUsesWith(From, To, Dominates);
3265}
3266
3268 DominatorTree &DT,
3269 const BasicBlock *BB) {
3270 auto Dominates = [&](const Use &U) { return DT.dominates(BB, U); };
3271 return ::replaceDominatedUsesWith(From, To, Dominates);
3272}
3273
3275 DominatorTree &DT,
3276 const Instruction *I) {
3277 auto Dominates = [&](const Use &U) { return DT.dominates(I, U); };
3278 return ::replaceDominatedUsesWith(From, To, Dominates);
3279}
3280
3282 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root,
3283 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3284 auto DominatesAndShouldReplace = [&](const Use &U) {
3285 return DT.dominates(Root, U) && ShouldReplace(U, To);
3286 };
3287 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3288}
3289
3291 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB,
3292 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3293 auto DominatesAndShouldReplace = [&](const Use &U) {
3294 return DT.dominates(BB, U) && ShouldReplace(U, To);
3295 };
3296 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3297}
3298
3300 Value *From, Value *To, DominatorTree &DT, const Instruction *I,
3301 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) {
3302 auto DominatesAndShouldReplace = [&](const Use &U) {
3303 return DT.dominates(I, U) && ShouldReplace(U, To);
3304 };
3305 return ::replaceDominatedUsesWith(From, To, DominatesAndShouldReplace);
3306}
3307
3309 const TargetLibraryInfo &TLI) {
3310 // Check if the function is specifically marked as a gc leaf function.
3311 if (Call->hasFnAttr("gc-leaf-function"))
3312 return true;
3313 if (const Function *F = Call->getCalledFunction()) {
3314 if (F->hasFnAttribute("gc-leaf-function"))
3315 return true;
3316
3317 if (auto IID = F->getIntrinsicID()) {
3318 // Most LLVM intrinsics do not take safepoints.
3319 return IID != Intrinsic::experimental_gc_statepoint &&
3320 IID != Intrinsic::experimental_deoptimize &&
3321 IID != Intrinsic::memcpy_element_unordered_atomic &&
3322 IID != Intrinsic::memmove_element_unordered_atomic;
3323 }
3324 }
3325
3326 // Lib calls can be materialized by some passes, and won't be
3327 // marked as 'gc-leaf-function.' All available Libcalls are
3328 // GC-leaf.
3329 LibFunc LF;
3330 if (TLI.getLibFunc(*Call, LF)) {
3331 return TLI.has(LF);
3332 }
3333
3334 return false;
3335}
3336
3338 LoadInst &NewLI) {
3339 auto *NewTy = NewLI.getType();
3340
3341 // This only directly applies if the new type is also a pointer.
3342 if (NewTy->isPointerTy()) {
3343 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
3344 return;
3345 }
3346
3347 // The only other translation we can do is to integral loads with !range
3348 // metadata.
3349 if (!NewTy->isIntegerTy())
3350 return;
3351
3352 MDBuilder MDB(NewLI.getContext());
3353 const Value *Ptr = OldLI.getPointerOperand();
3354 auto *ITy = cast<IntegerType>(NewTy);
3355 auto *NullInt = ConstantExpr::getPtrToInt(
3357 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3358 NewLI.setMetadata(LLVMContext::MD_range,
3359 MDB.createRange(NonNullInt, NullInt));
3360}
3361
3363 MDNode *N, LoadInst &NewLI) {
3364 auto *NewTy = NewLI.getType();
3365 // Simply copy the metadata if the type did not change.
3366 if (NewTy == OldLI.getType()) {
3367 NewLI.setMetadata(LLVMContext::MD_range, N);
3368 return;
3369 }
3370
3371 // Give up unless it is converted to a pointer where there is a single very
3372 // valuable mapping we can do reliably.
3373 // FIXME: It would be nice to propagate this in more ways, but the type
3374 // conversions make it hard.
3375 if (!NewTy->isPointerTy())
3376 return;
3377
3378 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3379 if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3380 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3381 MDNode *NN = MDNode::get(OldLI.getContext(), {});
3382 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3383 }
3384}
3385
3388 findDbgUsers(&I, DPUsers);
3389 for (auto *DVR : DPUsers)
3390 DVR->eraseFromParent();
3391}
3392
3394 BasicBlock *BB) {
3395 // Since we are moving the instructions out of its basic block, we do not
3396 // retain their original debug locations (DILocations) and debug intrinsic
3397 // instructions.
3398 //
3399 // Doing so would degrade the debugging experience.
3400 //
3401 // FIXME: Issue #152767: debug info should also be the same as the
3402 // original branch, **if** the user explicitly indicated that (for sampling
3403 // PGO)
3404 //
3405 // Currently, when hoisting the instructions, we take the following actions:
3406 // - Remove their debug intrinsic instructions.
3407 // - Set their debug locations to the values from the insertion point.
3408 //
3409 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3410 // need to be deleted, is because there will not be any instructions with a
3411 // DILocation in either branch left after performing the transformation. We
3412 // can only insert a dbg.value after the two branches are joined again.
3413 //
3414 // See PR38762, PR39243 for more details.
3415 //
3416 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3417 // encode predicated DIExpressions that yield different results on different
3418 // code paths.
3419
3420 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3421 Instruction *I = &*II;
3422 I->dropUBImplyingAttrsAndMetadata();
3423 if (I->isUsedByMetadata())
3424 dropDebugUsers(*I);
3425 // RemoveDIs: drop debug-info too as the following code does.
3426 I->dropDbgRecords();
3427 if (I->isDebugOrPseudoInst()) {
3428 // Remove DbgInfo and pseudo probe Intrinsics.
3429 II = I->eraseFromParent();
3430 continue;
3431 }
3432 I->setDebugLoc(InsertPt->getDebugLoc());
3433 ++II;
3434 }
3435 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3436 BB->getTerminator()->getIterator());
3437}
3438
3440 Type &Ty) {
3441 // Create integer constant expression.
3442 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {
3443 const APInt &API = cast<ConstantInt>(&CV)->getValue();
3444 std::optional<int64_t> InitIntOpt;
3445 if (API.getBitWidth() == 1)
3446 InitIntOpt = API.tryZExtValue();
3447 else
3448 InitIntOpt = API.trySExtValue();
3449 return InitIntOpt ? DIB.createConstantValueExpression(
3450 static_cast<uint64_t>(*InitIntOpt))
3451 : nullptr;
3452 };
3453
3454 if (isa<ConstantInt>(C))
3455 return createIntegerExpression(C);
3456
3457 auto *FP = dyn_cast<ConstantFP>(&C);
3458 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
3459 const APFloat &APF = FP->getValueAPF();
3460 APInt const &API = APF.bitcastToAPInt();
3461 if (uint64_t Temp = API.getZExtValue())
3462 return DIB.createConstantValueExpression(Temp);
3463 return DIB.createConstantValueExpression(*API.getRawData());
3464 }
3465
3466 if (!Ty.isPointerTy())
3467 return nullptr;
3468
3470 return DIB.createConstantValueExpression(0);
3471
3472 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C))
3473 if (CE->getOpcode() == Instruction::IntToPtr) {
3474 const Value *V = CE->getOperand(0);
3475 if (auto CI = dyn_cast_or_null<ConstantInt>(V))
3476 return createIntegerExpression(*CI);
3477 }
3478 return nullptr;
3479}
3480
3482 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) {
3483 for (auto *Op : Set) {
3484 auto I = Mapping.find(Op);
3485 if (I != Mapping.end())
3486 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true);
3487 }
3488 };
3489 auto RemapAssignAddress = [&Mapping](auto *DA) {
3490 auto I = Mapping.find(DA->getAddress());
3491 if (I != Mapping.end())
3492 DA->setAddress(I->second);
3493 };
3494 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) {
3495 RemapDebugOperands(&DVR, DVR.location_ops());
3496 if (DVR.isDbgAssign())
3497 RemapAssignAddress(&DVR);
3498 }
3499}
3500
3501namespace {
3502
3503/// A potential constituent of a bitreverse or bswap expression. See
3504/// collectBitParts for a fuller explanation.
3505struct BitPart {
3506 BitPart(Value *P, unsigned BW) : Provider(P) {
3507 Provenance.resize(BW);
3508 }
3509
3510 /// The Value that this is a bitreverse/bswap of.
3511 Value *Provider;
3512
3513 /// The "provenance" of each bit. Provenance[A] = B means that bit A
3514 /// in Provider becomes bit B in the result of this expression.
3515 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3516
3517 enum { Unset = -1 };
3518};
3519
3520} // end anonymous namespace
3521
3522/// Analyze the specified subexpression and see if it is capable of providing
3523/// pieces of a bswap or bitreverse. The subexpression provides a potential
3524/// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3525/// the output of the expression came from a corresponding bit in some other
3526/// value. This function is recursive, and the end result is a mapping of
3527/// bitnumber to bitnumber. It is the caller's responsibility to validate that
3528/// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3529///
3530/// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3531/// that the expression deposits the low byte of %X into the high byte of the
3532/// result and that all other bits are zero. This expression is accepted and a
3533/// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3534/// [0-7].
3535///
3536/// For vector types, all analysis is performed at the per-element level. No
3537/// cross-element analysis is supported (shuffle/insertion/reduction), and all
3538/// constant masks must be splatted across all elements.
3539///
3540/// To avoid revisiting values, the BitPart results are memoized into the
3541/// provided map. To avoid unnecessary copying of BitParts, BitParts are
3542/// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3543/// store BitParts objects, not pointers. As we need the concept of a nullptr
3544/// BitParts (Value has been analyzed and the analysis failed), we an Optional
3545/// type instead to provide the same functionality.
3546///
3547/// Because we pass around references into \c BPS, we must use a container that
3548/// does not invalidate internal references (std::map instead of DenseMap).
3549static const std::optional<BitPart> &
3550collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3551 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3552 bool &FoundRoot) {
3553 auto [I, Inserted] = BPS.try_emplace(V);
3554 if (!Inserted)
3555 return I->second;
3556
3557 auto &Result = I->second;
3558 auto BitWidth = V->getType()->getScalarSizeInBits();
3559
3560 // Can't do integer/elements > 128 bits.
3561 if (BitWidth > 128)
3562 return Result;
3563
3564 // Prevent stack overflow by limiting the recursion depth
3566 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3567 return Result;
3568 }
3569
3570 if (auto *I = dyn_cast<Instruction>(V)) {
3571 Value *X, *Y;
3572 const APInt *C;
3573
3574 // If this is an or instruction, it may be an inner node of the bswap.
3575 if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3576 // Check we have both sources and they are from the same provider.
3577 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3578 Depth + 1, FoundRoot);
3579 if (!A || !A->Provider)
3580 return Result;
3581
3582 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3583 Depth + 1, FoundRoot);
3584 if (!B || A->Provider != B->Provider)
3585 return Result;
3586
3587 // Try and merge the two together.
3588 Result = BitPart(A->Provider, BitWidth);
3589 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3590 if (A->Provenance[BitIdx] != BitPart::Unset &&
3591 B->Provenance[BitIdx] != BitPart::Unset &&
3592 A->Provenance[BitIdx] != B->Provenance[BitIdx])
3593 return Result = std::nullopt;
3594
3595 if (A->Provenance[BitIdx] == BitPart::Unset)
3596 Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3597 else
3598 Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3599 }
3600
3601 return Result;
3602 }
3603
3604 // If this is a logical shift by a constant, recurse then shift the result.
3605 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3606 const APInt &BitShift = *C;
3607
3608 // Ensure the shift amount is defined.
3609 if (BitShift.uge(BitWidth))
3610 return Result;
3611
3612 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3613 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3614 return Result;
3615
3616 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3617 Depth + 1, FoundRoot);
3618 if (!Res)
3619 return Result;
3620 Result = Res;
3621
3622 // Perform the "shift" on BitProvenance.
3623 auto &P = Result->Provenance;
3624 if (I->getOpcode() == Instruction::Shl) {
3625 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3626 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3627 } else {
3628 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3629 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3630 }
3631
3632 return Result;
3633 }
3634
3635 // If this is a logical 'and' with a mask that clears bits, recurse then
3636 // unset the appropriate bits.
3637 if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3638 const APInt &AndMask = *C;
3639
3640 // Check that the mask allows a multiple of 8 bits for a bswap, for an
3641 // early exit.
3642 unsigned NumMaskedBits = AndMask.popcount();
3643 if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3644 return Result;
3645
3646 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3647 Depth + 1, FoundRoot);
3648 if (!Res)
3649 return Result;
3650 Result = Res;
3651
3652 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3653 // If the AndMask is zero for this bit, clear the bit.
3654 if (AndMask[BitIdx] == 0)
3655 Result->Provenance[BitIdx] = BitPart::Unset;
3656 return Result;
3657 }
3658
3659 // If this is a zext instruction zero extend the result.
3660 if (match(V, m_ZExt(m_Value(X)))) {
3661 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3662 Depth + 1, FoundRoot);
3663 if (!Res)
3664 return Result;
3665
3666 Result = BitPart(Res->Provider, BitWidth);
3667 auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3668 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3669 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3670 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3671 Result->Provenance[BitIdx] = BitPart::Unset;
3672 return Result;
3673 }
3674
3675 // If this is a truncate instruction, extract the lower bits.
3676 if (match(V, m_Trunc(m_Value(X)))) {
3677 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3678 Depth + 1, FoundRoot);
3679 if (!Res)
3680 return Result;
3681
3682 Result = BitPart(Res->Provider, BitWidth);
3683 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3684 Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3685 return Result;
3686 }
3687
3688 // BITREVERSE - most likely due to us previous matching a partial
3689 // bitreverse.
3690 if (match(V, m_BitReverse(m_Value(X)))) {
3691 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3692 Depth + 1, FoundRoot);
3693 if (!Res)
3694 return Result;
3695
3696 Result = BitPart(Res->Provider, BitWidth);
3697 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3698 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3699 return Result;
3700 }
3701
3702 // BSWAP - most likely due to us previous matching a partial bswap.
3703 if (match(V, m_BSwap(m_Value(X)))) {
3704 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3705 Depth + 1, FoundRoot);
3706 if (!Res)
3707 return Result;
3708
3709 unsigned ByteWidth = BitWidth / 8;
3710 Result = BitPart(Res->Provider, BitWidth);
3711 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3712 unsigned ByteBitOfs = ByteIdx * 8;
3713 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3714 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3715 Res->Provenance[ByteBitOfs + BitIdx];
3716 }
3717 return Result;
3718 }
3719
3720 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3721 // amount (modulo).
3722 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3723 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3724 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3725 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3726 // We can treat fshr as a fshl by flipping the modulo amount.
3727 unsigned ModAmt = C->urem(BitWidth);
3728 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3729 ModAmt = BitWidth - ModAmt;
3730
3731 // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3732 if (!MatchBitReversals && (ModAmt % 8) != 0)
3733 return Result;
3734
3735 // Check we have both sources and they are from the same provider.
3736 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3737 Depth + 1, FoundRoot);
3738 if (!LHS || !LHS->Provider)
3739 return Result;
3740
3741 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3742 Depth + 1, FoundRoot);
3743 if (!RHS || LHS->Provider != RHS->Provider)
3744 return Result;
3745
3746 unsigned StartBitRHS = BitWidth - ModAmt;
3747 Result = BitPart(LHS->Provider, BitWidth);
3748 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3749 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3750 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3751 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3752 return Result;
3753 }
3754 }
3755
3756 // If we've already found a root input value then we're never going to merge
3757 // these back together.
3758 if (FoundRoot)
3759 return Result;
3760
3761 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3762 // be the root input value to the bswap/bitreverse.
3763 FoundRoot = true;
3764 Result = BitPart(V, BitWidth);
3765 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3766 Result->Provenance[BitIdx] = BitIdx;
3767 return Result;
3768}
3769
3770static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3771 unsigned BitWidth) {
3772 if (From % 8 != To % 8)
3773 return false;
3774 // Convert from bit indices to byte indices and check for a byte reversal.
3775 From >>= 3;
3776 To >>= 3;
3777 BitWidth >>= 3;
3778 return From == BitWidth - To - 1;
3779}
3780
3781static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3782 unsigned BitWidth) {
3783 return From == BitWidth - To - 1;
3784}
3785
3787 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3788 SmallVectorImpl<Instruction *> &InsertedInsts) {
3789 if (!match(I, m_Or(m_Value(), m_Value())) &&
3790 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3791 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) &&
3792 !match(I, m_BSwap(m_Value())))
3793 return false;
3794 if (!MatchBSwaps && !MatchBitReversals)
3795 return false;
3796 Type *ITy = I->getType();
3797 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() == 1 ||
3798 ITy->getScalarSizeInBits() > 128)
3799 return false; // Can't do integer/elements > 128 bits.
3800
3801 // Try to find all the pieces corresponding to the bswap.
3802 bool FoundRoot = false;
3803 std::map<Value *, std::optional<BitPart>> BPS;
3804 const auto &Res =
3805 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3806 if (!Res)
3807 return false;
3808 ArrayRef<int8_t> BitProvenance = Res->Provenance;
3809 assert(all_of(BitProvenance,
3810 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3811 "Illegal bit provenance index");
3812
3813 // If the upper bits are zero, then attempt to perform as a truncated op.
3814 Type *DemandedTy = ITy;
3815 if (BitProvenance.back() == BitPart::Unset) {
3816 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3817 BitProvenance = BitProvenance.drop_back();
3818 if (BitProvenance.empty())
3819 return false; // TODO - handle null value?
3820 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3821 if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3822 DemandedTy = VectorType::get(DemandedTy, IVecTy);
3823 }
3824
3825 // Check BitProvenance hasn't found a source larger than the result type.
3826 unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3827 if (DemandedBW > ITy->getScalarSizeInBits())
3828 return false;
3829
3830 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3831 // only byteswap values with an even number of bytes.
3832 APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3833 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3834 bool OKForBitReverse = MatchBitReversals;
3835 for (unsigned BitIdx = 0;
3836 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3837 if (BitProvenance[BitIdx] == BitPart::Unset) {
3838 DemandedMask.clearBit(BitIdx);
3839 continue;
3840 }
3841 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3842 DemandedBW);
3843 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3844 BitIdx, DemandedBW);
3845 }
3846
3847 Intrinsic::ID Intrin;
3848 if (OKForBSwap)
3849 Intrin = Intrinsic::bswap;
3850 else if (OKForBitReverse)
3851 Intrin = Intrinsic::bitreverse;
3852 else
3853 return false;
3854
3855 Function *F =
3856 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy);
3857 Value *Provider = Res->Provider;
3858
3859 // We may need to truncate the provider.
3860 if (DemandedTy != Provider->getType()) {
3861 auto *Trunc =
3862 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator());
3863 InsertedInsts.push_back(Trunc);
3864 Provider = Trunc;
3865 }
3866
3867 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator());
3868 InsertedInsts.push_back(Result);
3869
3870 if (!DemandedMask.isAllOnes()) {
3871 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3872 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator());
3873 InsertedInsts.push_back(Result);
3874 }
3875
3876 // We may need to zeroextend back to the result type.
3877 if (ITy != Result->getType()) {
3878 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator());
3879 InsertedInsts.push_back(ExtInst);
3880 }
3881
3882 return true;
3883}
3884
3885// CodeGen has special handling for some string functions that may replace
3886// them with target-specific intrinsics. Since that'd skip our interceptors
3887// in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3888// we mark affected calls as NoBuiltin, which will disable optimization
3889// in CodeGen.
3891 CallInst *CI, const TargetLibraryInfo *TLI) {
3892 Function *F = CI->getCalledFunction();
3893 LibFunc Func;
3894 if (F && !F->hasLocalLinkage() && F->hasName() &&
3895 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3896 !F->doesNotAccessMemory())
3897 CI->addFnAttr(Attribute::NoBuiltin);
3898}
3899
3901 const auto *Op = I->getOperand(OpIdx);
3902 // We can't have a PHI with a metadata or token type.
3903 if (Op->getType()->isMetadataTy() || Op->getType()->isTokenLikeTy())
3904 return false;
3905
3906 // swifterror pointers can only be used by a load, store, or as a swifterror
3907 // argument; swifterror pointers are not allowed to be used in select or phi
3908 // instructions.
3909 if (Op->isSwiftError())
3910 return false;
3911
3912 // Protected pointer field loads/stores should be paired with the intrinsic
3913 // to avoid unnecessary address escapes.
3914 if (auto *II = dyn_cast<IntrinsicInst>(Op))
3915 if (II->getIntrinsicID() == Intrinsic::protected_field_ptr)
3916 return false;
3917
3918 // Cannot replace alloca argument with phi/select.
3919 if (I->isLifetimeStartOrEnd())
3920 return false;
3921
3922 // Early exit.
3924 return true;
3925
3926 switch (I->getOpcode()) {
3927 default:
3928 return true;
3929 case Instruction::Call:
3930 case Instruction::Invoke: {
3931 const auto &CB = cast<CallBase>(*I);
3932
3933 // Can't handle inline asm. Skip it.
3934 if (CB.isInlineAsm())
3935 return false;
3936
3937 // Constant bundle operands may need to retain their constant-ness for
3938 // correctness.
3939 if (CB.isBundleOperand(OpIdx))
3940 return false;
3941
3942 if (OpIdx < CB.arg_size()) {
3943 // Some variadic intrinsics require constants in the variadic arguments,
3944 // which currently aren't markable as immarg.
3945 if (isa<IntrinsicInst>(CB) &&
3946 OpIdx >= CB.getFunctionType()->getNumParams()) {
3947 // This is known to be OK for stackmap.
3948 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3949 }
3950
3951 // gcroot is a special case, since it requires a constant argument which
3952 // isn't also required to be a simple ConstantInt.
3953 if (CB.getIntrinsicID() == Intrinsic::gcroot)
3954 return false;
3955
3956 // Some intrinsic operands are required to be immediates.
3957 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3958 }
3959
3960 // It is never allowed to replace the call argument to an intrinsic, but it
3961 // may be possible for a call.
3962 return !isa<IntrinsicInst>(CB);
3963 }
3964 case Instruction::ShuffleVector:
3965 // Shufflevector masks are constant.
3966 return OpIdx != 2;
3967 case Instruction::Switch:
3968 case Instruction::ExtractValue:
3969 // All operands apart from the first are constant.
3970 return OpIdx == 0;
3971 case Instruction::InsertValue:
3972 // All operands apart from the first and the second are constant.
3973 return OpIdx < 2;
3974 case Instruction::Alloca:
3975 // Static allocas (constant size in the entry block) are handled by
3976 // prologue/epilogue insertion so they're free anyway. We definitely don't
3977 // want to make them non-constant.
3978 return !cast<AllocaInst>(I)->isStaticAlloca();
3979 case Instruction::GetElementPtr:
3980 if (OpIdx == 0)
3981 return true;
3983 for (auto E = std::next(It, OpIdx); It != E; ++It)
3984 if (It.isStruct())
3985 return false;
3986 return true;
3987 }
3988}
3989
3991 // First: Check if it's a constant
3992 if (Constant *C = dyn_cast<Constant>(Condition))
3993 return ConstantExpr::getNot(C);
3994
3995 // Second: If the condition is already inverted, return the original value
3996 Value *NotCondition;
3997 if (match(Condition, m_Not(m_Value(NotCondition))))
3998 return NotCondition;
3999
4000 BasicBlock *Parent = nullptr;
4001 Instruction *Inst = dyn_cast<Instruction>(Condition);
4002 if (Inst)
4003 Parent = Inst->getParent();
4004 else if (Argument *Arg = dyn_cast<Argument>(Condition))
4005 Parent = &Arg->getParent()->getEntryBlock();
4006 assert(Parent && "Unsupported condition to invert");
4007
4008 // Third: Check all the users for an invert
4009 for (User *U : Condition->users())
4011 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
4012 return I;
4013
4014 // Last option: Create a new instruction
4015 auto *Inverted =
4016 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
4017 if (Inst && !isa<PHINode>(Inst))
4018 Inverted->insertAfter(Inst->getIterator());
4019 else
4020 Inverted->insertBefore(Parent->getFirstInsertionPt());
4021 return Inverted;
4022}
4023
4025 // Note: We explicitly check for attributes rather than using cover functions
4026 // because some of the cover functions include the logic being implemented.
4027
4028 bool Changed = false;
4029 // readnone + not convergent implies nosync
4030 if (!F.hasFnAttribute(Attribute::NoSync) &&
4031 F.doesNotAccessMemory() && !F.isConvergent()) {
4032 F.setNoSync();
4033 Changed = true;
4034 }
4035
4036 // readonly implies nofree
4037 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
4038 F.setDoesNotFreeMemory();
4039 Changed = true;
4040 }
4041
4042 // willreturn implies mustprogress
4043 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
4044 F.setMustProgress();
4045 Changed = true;
4046 }
4047
4048 // TODO: There are a bunch of cases of restrictive memory effects we
4049 // can infer by inspecting arguments of argmemonly-ish functions.
4050
4051 return Changed;
4052}
4053
4055#ifndef NDEBUG
4056 if (Opcode)
4057 assert(Opcode == I.getOpcode() &&
4058 "can only use mergeFlags on instructions with matching opcodes");
4059 else
4060 Opcode = I.getOpcode();
4061#endif
4063 HasNUW &= I.hasNoUnsignedWrap();
4064 HasNSW &= I.hasNoSignedWrap();
4065 }
4066 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4067 IsDisjoint &= DisjointOp->isDisjoint();
4068}
4069
4071 I.clearSubclassOptionalData();
4072 if (I.getOpcode() == Instruction::Add ||
4073 (I.getOpcode() == Instruction::Mul && AllKnownNonZero)) {
4074 if (HasNUW)
4075 I.setHasNoUnsignedWrap();
4076 if (HasNSW && (AllKnownNonNegative || HasNUW))
4077 I.setHasNoSignedWrap();
4078 }
4079 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
4080 DisjointOp->setIsDisjoint(IsDisjoint);
4081}
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefInfo InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isEqual(const Function &Caller, const Function &Callee)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isSentinel(const DWARFDebugNames::AttributeEncoding &AE)
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
static unsigned getHashValueImpl(SimpleValue Val)
Definition EarlyCSE.cpp:229
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS)
Definition EarlyCSE.cpp:350
Hexagon Common GEP
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
This defines the Use class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
SmallDenseMap< BasicBlock *, Value *, 16 > IncomingValueMap
Definition Local.cpp:912
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR)
Check if the alloc size of ValTy is large enough to cover the variable (or fragment of the variable) ...
Definition Local.cpp:1633
static bool markAliveBlocks(Function &F, SmallPtrSetImpl< BasicBlock * > &Reachable, DomTreeUpdater *DTU=nullptr)
Definition Local.cpp:2659
static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, Type *ToTy)
Check if a bitcast between a value of type FromTy to type ToTy would losslessly preserve the bits and...
Definition Local.cpp:2409
static bool isStructure(AllocaInst *AI)
Determine whether this alloca is a structure.
Definition Local.cpp:1771
uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode)
Definition Local.cpp:2157
static bool PhiHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, PHINode *APN)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1609
static void combineMetadata(Instruction *K, const Instruction *J, bool DoesKMove, bool AAOnly=false)
If AAOnly is set, only intersect alias analysis metadata and preserve other known metadata.
Definition Local.cpp:2935
static void handleSSAValueOperands(uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues, Instruction *I)
Definition Local.cpp:2187
std::optional< DIExpression * > DbgValReplacement
A replacement for a dbg.value expression.
Definition Local.cpp:2336
static bool rewriteDebugUsers(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, function_ref< DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr)
Point debug users of From to To using exprs given by RewriteExpr, possibly moving/undefing users to p...
Definition Local.cpp:2341
Value * getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2199
static DIExpression * dropInitialDeref(const DIExpression *DIExpr)
Definition Local.cpp:1669
static void replaceUndefValuesInPhi(PHINode *PN, const IncomingValueMap &IncomingValues)
Replace the incoming undef values to a phi with the values from a block-to-value map.
Definition Local.cpp:977
Value * getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2131
static bool CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds, BasicBlock *&CommonPred)
Definition Local.cpp:1020
Value * getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Opcodes, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2258
static bool CanMergeValues(Value *First, Value *Second)
Return true if we can choose one of these values to use in place of the other.
Definition Local.cpp:846
static bool simplifyAndDCEInstruction(Instruction *I, SmallSetVector< Instruction *, 16 > &WorkList, const DataLayout &DL, const TargetLibraryInfo *TLI)
Definition Local.cpp:663
static bool isArray(AllocaInst *AI)
Determine whether this alloca is either a VLA or an array.
Definition Local.cpp:1765
static bool areAllUsesEqual(Instruction *I)
areAllUsesEqual - Check whether the uses of a value are all the same.
Definition Local.cpp:622
static cl::opt< bool > PHICSEDebugHash("phicse-debug-hash", cl::init(false), cl::Hidden, cl::desc("Perform extra assertion checking to verify that PHINodes's hash " "function is well-behaved w.r.t. its isEqual predicate"))
static void gatherIncomingValuesToPhi(PHINode *PN, const PredBlockVector &BBPreds, IncomingValueMap &IncomingValues)
Create a map from block to value for the operands of a given phi.
Definition Local.cpp:954
uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred)
Definition Local.cpp:2233
static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3770
static const std::optional< BitPart > & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map< Value *, std::optional< BitPart > > &BPS, int Depth, bool &FoundRoot)
Analyze the specified subexpression and see if it is capable of providing pieces of a bswap or bitrev...
Definition Local.cpp:3550
static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1390
static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, const SmallPtrSetImpl< BasicBlock * > &BBPreds)
Return true if we can fold BB, an almost-empty BB ending in an unconditional branch to Succ,...
Definition Local.cpp:855
static cl::opt< unsigned > PHICSENumPHISmallSize("phicse-num-phi-smallsize", cl::init(32), cl::Hidden, cl::desc("When the basic block contains not more than this number of PHI nodes, " "perform a (faster!) exhaustive search instead of set-driven one."))
static void updateOneDbgValueForAlloca(const DebugLoc &Loc, DILocalVariable *DIVar, DIExpression *DIExpr, Value *NewAddress, DbgVariableRecord *DVR, DIBuilder &Builder, int Offset)
Definition Local.cpp:1973
static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, SmallPtrSetImpl< PHINode * > &ToRemove)
Definition Local.cpp:1426
SmallVector< BasicBlock *, 16 > PredBlockVector
Definition Local.cpp:911
static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, const DebugLoc &NewLoc, BasicBlock::iterator Instr)
Definition Local.cpp:1658
static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ)
Check whether removing BB will make the phis in its Succ have too many incoming entries.
Definition Local.cpp:1053
static Value * selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, IncomingValueMap &IncomingValues)
Determines the value to use as the phi node input for a block.
Definition Local.cpp:926
static const unsigned BitPartRecursionMaxDepth
Definition Local.cpp:121
static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, const PredBlockVector &BBPreds, PHINode *PN, BasicBlock *CommonPred)
Replace a value flowing from a block to a phi with potentially multiple instances of that value flowi...
Definition Local.cpp:1085
static cl::opt< unsigned > MaxPhiEntriesIncreaseAfterRemovingEmptyBlock("max-phi-entries-increase-after-removing-empty-block", cl::init(1000), cl::Hidden, cl::desc("Stop removing an empty block if removing it will introduce more " "than this number of phi entries in its successor"))
static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, unsigned BitWidth)
Definition Local.cpp:3781
static void salvageDbgAssignAddress(T *Assign)
Definition Local.cpp:2015
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1416
Class for arbitrary precision integers.
Definition APInt.h:78
std::optional< uint64_t > tryZExtValue() const
Get zero extended value if possible.
Definition APInt.h:1561
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1415
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1679
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition APInt.h:570
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1583
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1571
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
an instruction to allocate memory on the stack
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & back() const
back - Get the last element.
Definition ArrayRef.h:151
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition ArrayRef.h:195
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:201
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
Value handle that asserts if the Value is deleted.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:483
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
const Instruction & back() const
Definition BasicBlock.h:495
bool hasAddressTaken() const
Returns true if there are any uses of this basic block other than direct branches,...
Definition BasicBlock.h:701
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI void insertDbgRecordBefore(DbgRecord *DR, InstListType::iterator Here)
Insert a DbgRecord into a block at the position given by Here.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI void flushTerminatorDbgRecords()
Eject any debug-info trailing at the end of a block.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
LLVM_ABI SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
size_t size() const
Definition BasicBlock.h:491
LLVM_ABI bool hasNPredecessorsOrMore(unsigned N) const
Return true if this block has N predecessors or more.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition BasicBlock.h:673
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a no-op cast from one type to another.
The address of a basic block.
Definition Constants.h:904
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
CallingConv::ID getCallingConv() const
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
mapped_iterator< op_iterator, DerefFnTy > handler_iterator
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
bool isSigned() const
Definition InstrTypes.h:930
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1130
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI void destroyConstant()
Called if some element of this constant is no longer valid.
DIExpression * createConstantValueExpression(uint64_t Val)
Create an expression for a variable that does not have an address, but does have a constant value.
Definition DIBuilder.h:940
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
ArrayRef< uint64_t > getElements() const
LLVM_ABI std::optional< uint64_t > getActiveBits(DIVariable *Var)
Return the number of bits that have an active value, i.e.
uint64_t getElement(unsigned I) const
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
std::optional< DIBasicType::Signedness > getSignedness() const
Return the signedness of this variable's type, or std::nullopt if this type is neither signed nor uns...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
This represents the llvm.dbg.label instruction.
Instruction * MarkedInstr
Link back to the Instruction that owns this marker.
LLVM_ABI void removeFromParent()
LLVM_ABI Module * getModule()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
LLVM_ABI unsigned getNumVariableLocationOps() const
bool isAddressOfVariable() const
Does this describe the address of a local variable.
LLVM_ABI DbgVariableRecord * clone() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
A debug info location.
Definition DebugLoc.h:123
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:48
static DebugLoc getTemporary()
Definition DebugLoc.h:160
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
unsigned size() const
Definition DenseMap.h:110
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:299
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
LLVM_ABI void deleteBB(BasicBlock *DelBB)
Delete DelBB.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
const BasicBlock & getEntryBlock() const
Definition Function.h:813
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
bool hasDomTree() const
Returns true if it holds a DomTreeT.
void recalculate(FuncT &F)
Notify DTU that the entry block was replaced.
bool isBBPendingDeletion(BasicBlockT *DelBB) const
Returns true if DelBB is awaiting deletion.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2776
iterator_range< simple_ilist< DbgRecord >::iterator > getDbgRecordRange() const
Return a range over the DbgRecords attached to this instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI bool extractProfTotalWeight(uint64_t &TotalVal) const
Retrieve total raw weight values of a branch.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
LLVM_ABI void dropDbgRecords()
Erase any DbgRecords attached to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
LLVM_ABI MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
Definition MDBuilder.cpp:38
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition MDBuilder.cpp:96
Metadata node.
Definition Metadata.h:1080
static LLVM_ABI MDNode * getMostGenericAliasScope(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
static LLVM_ABI CaptureComponents toCaptureComponents(const MDNode *MD)
Convert !captures metadata to CaptureComponents. MD may be nullptr.
static LLVM_ABI MDNode * getMergedCalleeTypeMetadata(const MDNode *A, const MDNode *B)
static LLVM_ABI MDNode * getMostGenericTBAA(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoaliasAddrspace(MDNode *A, MDNode *B)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
static LLVM_ABI MDNode * getMergedProfMetadata(MDNode *A, MDNode *B, const Instruction *AInstr, const Instruction *BInstr)
Merge !prof metadata from two instructions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericRange(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * intersect(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * getMostGenericNoFPClass(MDNode *A, MDNode *B)
LLVMContext & getContext() const
Definition Metadata.h:1244
static LLVM_ABI MDNode * fromCaptureComponents(LLVMContext &Ctx, CaptureComponents CC)
Convert CaptureComponents to !captures metadata.
static LLVM_ABI MDNode * getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B)
static LLVM_ABI MDNode * combine(LLVMContext &Ctx, const MMRAMetadata &A, const MMRAMetadata &B)
Combines A and B according to MMRA semantics.
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
iterator end()
Definition MapVector.h:67
iterator find(const KeyT &Key)
Definition MapVector.h:154
bool empty() const
Definition MapVector.h:77
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
LLVM_ABI void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
LLVM_ABI void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
LLVM_ABI void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Root of the metadata hierarchy.
Definition Metadata.h:64
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
LLVM_ABI Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
void setIncomingValue(unsigned i, Value *V)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
Vector takeVector()
Clear the SetVector and return the underlying vector.
Definition SetVector.h:94
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
value_type pop_back_val()
Definition SetVector.h:279
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isTokenTy() const
Return true if this is 'token'.
Definition Type.h:234
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
value_op_iterator value_op_end()
Definition User.h:288
Value * getOperand(unsigned i) const
Definition User.h:207
value_op_iterator value_op_begin()
Definition User.h:285
iterator_range< value_op_iterator > operand_values()
Definition User.h:291
Value wrapper in the Metadata hierarchy.
Definition Metadata.h:459
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
iterator find(const KeyT &Val)
Definition ValueMap.h:160
iterator end()
Definition ValueMap.h:139
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
bool isUsedByMetadata() const
Return true if there is metadata referencing this value.
Definition Value.h:568
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
bool use_empty() const
Definition Value.h:346
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
Definition Value.h:829
iterator_range< use_iterator > uses()
Definition Value.h:380
user_iterator_impl< User > user_iterator
Definition Value.h:391
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
void reserve(size_t Size)
Grow the DenseSet so that it can contain at least NumEntries items before resizing again.
Definition DenseSet.h:96
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1763
LLVM_ABI bool RemoveRedundantDbgInstrs(BasicBlock *BB)
Try to remove redundant dbg.value instructions from given basic block.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1730
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2498
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition Local.cpp:2616
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
LLVM_ABI unsigned replaceDominatedUsesWithIf(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge, function_ref< bool(const Use &U, const Value *To)> ShouldReplace)
Replace each use of 'From' with 'To' if that use is dominated by the given edge and the callback Shou...
Definition Local.cpp:3281
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
LLVM_ABI unsigned replaceNonLocalUsesWith(Instruction *From, Value *To)
Definition Local.cpp:3245
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1731
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition Local.cpp:2592
LLVM_ABI bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI)
Check whether the given call has no side-effects.
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
Definition Local.cpp:3120
LLVM_ABI void InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition Local.cpp:1721
constexpr from_range_t from_range
bool hasNItemsOrLess(IterTy &&Begin, IterTy &&End, unsigned N, Pred &&ShouldBeCounted=[](const decltype(*std::declval< IterTy >()) &) { return true;})
Returns true if the sequence [Begin, End) has N or less items.
Definition STLExtras.h:2649
LLVM_ABI void remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst)
Remap the operands of the debug records attached to Inst, and the operands of Inst itself if it's a d...
Definition Local.cpp:3481
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto cast_or_null(const Y &Val)
Definition Casting.h:714
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:721
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified block, which must have no predecessors.
LLVM_ABI bool hasBranchWeightOrigin(const Instruction &I)
Check if Branch Weight Metadata has an "expected" field from an llvm.expect* intrinsic.
LLVM_ABI void insertDebugValuesForPHIs(BasicBlock *BB, SmallVectorImpl< PHINode * > &InsertedPHIs)
Propagate dbg.value intrinsics through the newly inserted PHIs.
Definition Local.cpp:1892
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI MDNode * intersectAccessGroups(const Instruction *Inst1, const Instruction *Inst2)
Compute the access-group list of access groups that Inst1 and Inst2 are both in.
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2481
LLVM_ABI bool canSimplifyInvokeNoUnwind(const Function *F)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is known to contain an unconditional branch, and contains no instructions other than PHI nodes,...
Definition Local.cpp:1153
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
Definition Local.cpp:3786
LLVM_ABI MDNode * getValidBranchWeightMDNode(const Instruction &I)
Get the valid branch weights metadata node.
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition Local.cpp:1579
LLVM_ABI bool wouldInstructionBeTriviallyDeadOnUnusedPaths(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction has no side effects on any paths other than whe...
Definition Local.cpp:409
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1808
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI DIExpression * getExpressionForConstant(DIBuilder &DIB, const Constant &C, Type &Ty)
Given a constant, create a debug information expression.
Definition Local.cpp:3439
LLVM_ABI CallInst * createCallMatchingInvoke(InvokeInst *II)
Create a call that matches the invoke II in terms of arguments, attributes, debug information,...
Definition Local.cpp:2566
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1675
LLVM_ABI Instruction * removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
Replace 'BB's terminator with one that does not have an unwind successor block.
Definition Local.cpp:2858
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:421
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI void patchReplacementInstruction(Instruction *I, Value *Repl)
Patch the replacement so that it is not more restrictive than the value being replaced.
Definition Local.cpp:3181
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2050
LLVM_ABI unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge)
Replace each use of 'From' with 'To' if that use is dominated by the given edge.
Definition Local.cpp:3260
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2526
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2427
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2287
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
Definition Local.cpp:3111
LLVM_ABI void dropDebugUsers(Instruction &I)
Remove the debug intrinsic instructions for the given instruction.
Definition Local.cpp:3386
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU=nullptr)
BB is a block with one predecessor and its predecessor is known to have one successor (BB!...
Definition Local.cpp:761
LLVM_ABI bool replaceDbgUsesWithUndef(Instruction *I)
Replace all the uses of an SSA value in @llvm.dbg intrinsics with undef.
Definition Local.cpp:610
LLVM_ABI void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, BasicBlock *BB)
Hoist all of the instructions in the IfBlock to the dominant block DomBlock, by moving its instructio...
Definition Local.cpp:3393
LLVM_ABI void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a range metadata node to a new load instruction.
Definition Local.cpp:3362
LLVM_ABI DebugLoc getDebugValueLoc(DbgVariableRecord *DVR)
Produce a DebugLoc to use for each dbg.declare that is promoted to a dbg.value.
LLVM_ABI void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI)
Copy a nonnull metadata node to a new load instruction.
Definition Local.cpp:3337
LLVM_ABI bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx)
Given an instruction, is it legal to set operand OpIdx to a non-constant value?
Definition Local.cpp:3900
DWARFExpression::Operation Op
LLVM_ABI void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, int Offset=0)
Replaces multiple dbg.value records when the alloca it describes is replaced with a new value.
Definition Local.cpp:1995
LLVM_ABI Align tryEnforceAlignment(Value *V, Align PrefAlign, const DataLayout &DL)
If the specified pointer points to an object that we control, try to modify the object's alignment to...
Definition Local.cpp:1530
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition Local.cpp:548
constexpr unsigned BitWidth
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2009
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
LLVM_ABI void combineAAMetadata(Instruction *K, const Instruction *J)
Combine metadata of two instructions, where instruction J is a memory access that has been merged int...
Definition Local.cpp:3116
LLVM_ABI bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition Local.cpp:641
LLVM_ABI bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition Local.cpp:4024
LLVM_ABI Value * invertCondition(Value *Condition)
Invert the given true/false value, possibly reusing an existing copy.
Definition Local.cpp:3990
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition Hashing.h:592
LLVM_ABI void DeleteDeadBlocks(ArrayRef< BasicBlock * > BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3890
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition Local.cpp:2896
LLVM_ABI bool EliminateDuplicatePHINodes(BasicBlock *BB)
Check for and eliminate duplicate PHI nodes in this block.
Definition Local.cpp:1522
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI)
Return true if this call calls a gc leaf function.
Definition Local.cpp:3308
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition Hashing.h:466
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1955
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
#define NDEBUG
Definition regutils.h:48
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
An information struct used to provide DenseMap with the various necessary components for a given valu...
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:255
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
std::optional< unsigned > Opcode
Opcode of merged instructions.
Definition Local.h:591
LLVM_ABI void mergeFlags(Instruction &I)
Merge in the no-wrap flags from I.
Definition Local.cpp:4054
LLVM_ABI void applyFlags(Instruction &I)
Apply the no-wrap flags to I if applicable.
Definition Local.cpp:4070
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276