LLVM 18.0.0git
AssignmentTrackingAnalysis.cpp
Go to the documentation of this file.
12#include "llvm/IR/BasicBlock.h"
13#include "llvm/IR/DataLayout.h"
14#include "llvm/IR/DebugInfo.h"
15#include "llvm/IR/Function.h"
16#include "llvm/IR/Instruction.h"
18#include "llvm/IR/PassManager.h"
19#include "llvm/IR/PrintPasses.h"
25#include <assert.h>
26#include <cstdint>
27#include <optional>
28#include <queue>
29#include <sstream>
30#include <unordered_map>
31
32using namespace llvm;
33#define DEBUG_TYPE "debug-ata"
34
35STATISTIC(NumDefsScanned, "Number of dbg locs that get scanned for removal");
36STATISTIC(NumDefsRemoved, "Number of dbg locs removed");
37STATISTIC(NumWedgesScanned, "Number of dbg wedges scanned");
38STATISTIC(NumWedgesChanged, "Number of dbg wedges changed");
39
41 MaxNumBlocks("debug-ata-max-blocks", cl::init(10000),
42 cl::desc("Maximum num basic blocks before debug info dropped"),
44/// Option for debugging the pass, determines if the memory location fragment
45/// filling happens after generating the variable locations.
46static cl::opt<bool> EnableMemLocFragFill("mem-loc-frag-fill", cl::init(true),
48/// Print the results of the analysis. Respects -filter-print-funcs.
49static cl::opt<bool> PrintResults("print-debug-ata", cl::init(false),
51
52/// Coalesce adjacent dbg locs describing memory locations that have contiguous
53/// fragments. This reduces the cost of LiveDebugValues which does SSA
54/// construction for each explicitly stated variable fragment.
56 CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags", cl::Hidden);
57
58// Implicit conversions are disabled for enum class types, so unfortunately we
59// need to create a DenseMapInfo wrapper around the specified underlying type.
60template <> struct llvm::DenseMapInfo<VariableID> {
62 static inline VariableID getEmptyKey() {
63 return static_cast<VariableID>(Wrapped::getEmptyKey());
64 }
65 static inline VariableID getTombstoneKey() {
66 return static_cast<VariableID>(Wrapped::getTombstoneKey());
67 }
68 static unsigned getHashValue(const VariableID &Val) {
69 return Wrapped::getHashValue(static_cast<unsigned>(Val));
70 }
71 static bool isEqual(const VariableID &LHS, const VariableID &RHS) {
72 return LHS == RHS;
73 }
74};
75
76/// Helper class to build FunctionVarLocs, since that class isn't easy to
77/// modify. TODO: There's not a great deal of value in the split, it could be
78/// worth merging the two classes.
80 friend FunctionVarLocs;
82 // Use an unordered_map so we don't invalidate iterators after
83 // insert/modifications.
84 std::unordered_map<const Instruction *, SmallVector<VarLocInfo>>
85 VarLocsBeforeInst;
86
87 SmallVector<VarLocInfo> SingleLocVars;
88
89public:
90 unsigned getNumVariables() const { return Variables.size(); }
91
92 /// Find or insert \p V and return the ID.
94 return static_cast<VariableID>(Variables.insert(V));
95 }
96
97 /// Get a variable from its \p ID.
99 return Variables[static_cast<unsigned>(ID)];
100 }
101
102 /// Return ptr to wedge of defs or nullptr if no defs come just before /p
103 /// Before.
105 auto R = VarLocsBeforeInst.find(Before);
106 if (R == VarLocsBeforeInst.end())
107 return nullptr;
108 return &R->second;
109 }
110
111 /// Replace the defs that come just before /p Before with /p Wedge.
113 VarLocsBeforeInst[Before] = std::move(Wedge);
114 }
115
116 /// Add a def for a variable that is valid for its lifetime.
119 VarLocInfo VarLoc;
120 VarLoc.VariableID = insertVariable(Var);
121 VarLoc.Expr = Expr;
122 VarLoc.DL = DL;
123 VarLoc.Values = R;
124 SingleLocVars.emplace_back(VarLoc);
125 }
126
127 /// Add a def to the wedge of defs just before /p Before.
130 VarLocInfo VarLoc;
131 VarLoc.VariableID = insertVariable(Var);
132 VarLoc.Expr = Expr;
133 VarLoc.DL = DL;
134 VarLoc.Values = R;
135 VarLocsBeforeInst[Before].emplace_back(VarLoc);
136 }
137};
138
140 // Print the variable table first. TODO: Sorting by variable could make the
141 // output more stable?
142 unsigned Counter = -1;
143 OS << "=== Variables ===\n";
144 for (const DebugVariable &V : Variables) {
145 ++Counter;
146 // Skip first entry because it is a dummy entry.
147 if (Counter == 0) {
148 continue;
149 }
150 OS << "[" << Counter << "] " << V.getVariable()->getName();
151 if (auto F = V.getFragment())
152 OS << " bits [" << F->OffsetInBits << ", "
153 << F->OffsetInBits + F->SizeInBits << ")";
154 if (const auto *IA = V.getInlinedAt())
155 OS << " inlined-at " << *IA;
156 OS << "\n";
157 }
158
159 auto PrintLoc = [&OS](const VarLocInfo &Loc) {
160 OS << "DEF Var=[" << (unsigned)Loc.VariableID << "]"
161 << " Expr=" << *Loc.Expr << " Values=(";
162 for (auto *Op : Loc.Values.location_ops()) {
163 errs() << Op->getName() << " ";
164 }
165 errs() << ")\n";
166 };
167
168 // Print the single location variables.
169 OS << "=== Single location vars ===\n";
170 for (auto It = single_locs_begin(), End = single_locs_end(); It != End;
171 ++It) {
172 PrintLoc(*It);
173 }
174
175 // Print the non-single-location defs in line with IR.
176 OS << "=== In-line variable defs ===";
177 for (const BasicBlock &BB : Fn) {
178 OS << "\n" << BB.getName() << ":\n";
179 for (const Instruction &I : BB) {
180 for (auto It = locs_begin(&I), End = locs_end(&I); It != End; ++It) {
181 PrintLoc(*It);
182 }
183 OS << I << "\n";
184 }
185 }
186}
187
189 // Add the single-location variables first.
190 for (const auto &VarLoc : Builder.SingleLocVars)
191 VarLocRecords.emplace_back(VarLoc);
192 // Mark the end of the section.
193 SingleVarLocEnd = VarLocRecords.size();
194
195 // Insert a contiguous block of VarLocInfos for each instruction, mapping it
196 // to the start and end position in the vector with VarLocsBeforeInst.
197 for (auto &P : Builder.VarLocsBeforeInst) {
198 unsigned BlockStart = VarLocRecords.size();
199 for (const VarLocInfo &VarLoc : P.second)
200 VarLocRecords.emplace_back(VarLoc);
201 unsigned BlockEnd = VarLocRecords.size();
202 // Record the start and end indices.
203 if (BlockEnd != BlockStart)
204 VarLocsBeforeInst[P.first] = {BlockStart, BlockEnd};
205 }
206
207 // Copy the Variables vector from the builder's UniqueVector.
208 assert(Variables.empty() && "Expect clear before init");
209 // UniqueVectors IDs are one-based (which means the VarLocInfo VarID values
210 // are one-based) so reserve an extra and insert a dummy.
211 Variables.reserve(Builder.Variables.size() + 1);
212 Variables.push_back(DebugVariable(nullptr, std::nullopt, nullptr));
213 Variables.append(Builder.Variables.begin(), Builder.Variables.end());
214}
215
217 Variables.clear();
218 VarLocRecords.clear();
219 VarLocsBeforeInst.clear();
220 SingleVarLocEnd = 0;
221}
222
223/// Walk backwards along constant GEPs and bitcasts to the base storage from \p
224/// Start as far as possible. Prepend \Expression with the offset and append it
225/// with a DW_OP_deref that haes been implicit until now. Returns the walked-to
226/// value and modified expression.
227static std::pair<Value *, DIExpression *>
230 APInt OffsetInBytes(DL.getTypeSizeInBits(Start->getType()), false);
231 Value *End =
232 Start->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetInBytes);
234 if (OffsetInBytes.getBoolValue()) {
235 Ops = {dwarf::DW_OP_plus_uconst, OffsetInBytes.getZExtValue()};
237 Expression, Ops, /*StackValue=*/false, /*EntryValue=*/false);
238 }
239 Expression = DIExpression::append(Expression, {dwarf::DW_OP_deref});
240 return {End, Expression};
241}
242
243/// Extract the offset used in \p DIExpr. Returns std::nullopt if the expression
244/// doesn't explicitly describe a memory location with DW_OP_deref or if the
245/// expression is too complex to interpret.
246static std::optional<int64_t>
248 int64_t Offset = 0;
249 const unsigned NumElements = DIExpr->getNumElements();
250 const auto Elements = DIExpr->getElements();
251 unsigned ExpectedDerefIdx = 0;
252 // Extract the offset.
253 if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) {
254 Offset = Elements[1];
255 ExpectedDerefIdx = 2;
256 } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) {
257 ExpectedDerefIdx = 3;
258 if (Elements[2] == dwarf::DW_OP_plus)
259 Offset = Elements[1];
260 else if (Elements[2] == dwarf::DW_OP_minus)
261 Offset = -Elements[1];
262 else
263 return std::nullopt;
264 }
265
266 // If that's all there is it means there's no deref.
267 if (ExpectedDerefIdx >= NumElements)
268 return std::nullopt;
269
270 // Check the next element is DW_OP_deref - otherwise this is too complex or
271 // isn't a deref expression.
272 if (Elements[ExpectedDerefIdx] != dwarf::DW_OP_deref)
273 return std::nullopt;
274
275 // Check the final operation is either the DW_OP_deref or is a fragment.
276 if (NumElements == ExpectedDerefIdx + 1)
277 return Offset; // Ends with deref.
278 unsigned ExpectedFragFirstIdx = ExpectedDerefIdx + 1;
279 unsigned ExpectedFragFinalIdx = ExpectedFragFirstIdx + 2;
280 if (NumElements == ExpectedFragFinalIdx + 1 &&
281 Elements[ExpectedFragFirstIdx] == dwarf::DW_OP_LLVM_fragment)
282 return Offset; // Ends with deref + fragment.
283
284 // Don't bother trying to interpret anything more complex.
285 return std::nullopt;
286}
287
288/// A whole (unfragmented) source variable.
289using DebugAggregate = std::pair<const DILocalVariable *, const DILocation *>;
291 return DebugAggregate(DII->getVariable(), DII->getDebugLoc().getInlinedAt());
292}
294 return DebugAggregate(Var.getVariable(), Var.getInlinedAt());
295}
296
298 // Enabling fragment coalescing reduces compiler run time when instruction
299 // referencing is enabled. However, it may cause LiveDebugVariables to create
300 // incorrect locations. Since instruction-referencing mode effectively
301 // bypasses LiveDebugVariables we only enable coalescing if the cl::opt flag
302 // has not been explicitly set and instruction-referencing is turned on.
306 Triple(F.getParent()->getTargetTriple()));
308 return true;
310 return false;
311 }
312 llvm_unreachable("Unknown boolOrDefault value");
313}
314
315namespace {
316/// In dwarf emission, the following sequence
317/// 1. dbg.value ... Fragment(0, 64)
318/// 2. dbg.value ... Fragment(0, 32)
319/// effectively sets Fragment(32, 32) to undef (each def sets all bits not in
320/// the intersection of the fragments to having "no location"). This makes
321/// sense for implicit location values because splitting the computed values
322/// could be troublesome, and is probably quite uncommon. When we convert
323/// dbg.assigns to dbg.value+deref this kind of thing is common, and describing
324/// a location (memory) rather than a value means we don't need to worry about
325/// splitting any values, so we try to recover the rest of the fragment
326/// location here.
327/// This class performs a(nother) dataflow analysis over the function, adding
328/// variable locations so that any bits of a variable with a memory location
329/// have that location explicitly reinstated at each subsequent variable
330/// location definition that that doesn't overwrite those bits. i.e. after a
331/// variable location def, insert new defs for the memory location with
332/// fragments for the difference of "all bits currently in memory" and "the
333/// fragment of the second def".
334class MemLocFragmentFill {
335 Function &Fn;
336 FunctionVarLocsBuilder *FnVarLocs;
337 const DenseSet<DebugAggregate> *VarsWithStackSlot;
338 bool CoalesceAdjacentFragments;
339
340 // 0 = no memory location.
341 using BaseAddress = unsigned;
342 using OffsetInBitsTy = unsigned;
344 using FragsInMemMap = IntervalMap<
345 OffsetInBitsTy, BaseAddress,
347 FragTraits>;
348 FragsInMemMap::Allocator IntervalMapAlloc;
349 using VarFragMap = DenseMap<unsigned, FragsInMemMap>;
350
351 /// IDs for memory location base addresses in maps. Use 0 to indicate that
352 /// there's no memory location.
357
358 struct FragMemLoc {
359 unsigned Var;
360 unsigned Base;
361 unsigned OffsetInBits;
362 unsigned SizeInBits;
363 DebugLoc DL;
364 };
366
367 /// BBInsertBeforeMap holds a description for the set of location defs to be
368 /// inserted after the analysis is complete. It is updated during the dataflow
369 /// and the entry for a block is CLEARED each time it is (re-)visited. After
370 /// the dataflow is complete, each block entry will contain the set of defs
371 /// calculated during the final (fixed-point) iteration.
373
374 static bool intervalMapsAreEqual(const FragsInMemMap &A,
375 const FragsInMemMap &B) {
376 auto AIt = A.begin(), AEnd = A.end();
377 auto BIt = B.begin(), BEnd = B.end();
378 for (; AIt != AEnd; ++AIt, ++BIt) {
379 if (BIt == BEnd)
380 return false; // B has fewer elements than A.
381 if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop())
382 return false; // Interval is different.
383 if (*AIt != *BIt)
384 return false; // Value at interval is different.
385 }
386 // AIt == AEnd. Check BIt is also now at end.
387 return BIt == BEnd;
388 }
389
390 static bool varFragMapsAreEqual(const VarFragMap &A, const VarFragMap &B) {
391 if (A.size() != B.size())
392 return false;
393 for (const auto &APair : A) {
394 auto BIt = B.find(APair.first);
395 if (BIt == B.end())
396 return false;
397 if (!intervalMapsAreEqual(APair.second, BIt->second))
398 return false;
399 }
400 return true;
401 }
402
403 /// Return a string for the value that \p BaseID represents.
404 std::string toString(unsigned BaseID) {
405 if (BaseID)
406 return Bases[BaseID].getVariableLocationOp(0)->getName().str();
407 else
408 return "None";
409 }
410
411 /// Format string describing an FragsInMemMap (IntervalMap) interval.
412 std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) {
413 std::string String;
414 std::stringstream S(String);
415 if (It.valid()) {
416 S << "[" << It.start() << ", " << It.stop()
417 << "): " << toString(It.value());
418 } else {
419 S << "invalid iterator (end)";
420 }
421 if (Newline)
422 S << "\n";
423 return S.str();
424 };
425
426 FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) {
427 FragsInMemMap Result(IntervalMapAlloc);
428 for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) {
429 LLVM_DEBUG(dbgs() << "a " << toString(AIt));
430 // This is basically copied from process() and inverted (process is
431 // performing something like a union whereas this is more of an
432 // intersect).
433
434 // There's no work to do if interval `a` overlaps no fragments in map `B`.
435 if (!B.overlaps(AIt.start(), AIt.stop()))
436 continue;
437
438 // Does StartBit intersect an existing fragment?
439 auto FirstOverlap = B.find(AIt.start());
440 assert(FirstOverlap != B.end());
441 bool IntersectStart = FirstOverlap.start() < AIt.start();
442 LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false)
443 << ", IntersectStart: " << IntersectStart << "\n");
444
445 // Does EndBit intersect an existing fragment?
446 auto LastOverlap = B.find(AIt.stop());
447 bool IntersectEnd =
448 LastOverlap != B.end() && LastOverlap.start() < AIt.stop();
449 LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false)
450 << ", IntersectEnd: " << IntersectEnd << "\n");
451
452 // Check if both ends of `a` intersect the same interval `b`.
453 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
454 // Insert `a` (`a` is contained in `b`) if the values match.
455 // [ a ]
456 // [ - b - ]
457 // -
458 // [ r ]
459 LLVM_DEBUG(dbgs() << "- a is contained within "
460 << toString(FirstOverlap));
461 if (*AIt && *AIt == *FirstOverlap)
462 Result.insert(AIt.start(), AIt.stop(), *AIt);
463 } else {
464 // There's an overlap but `a` is not fully contained within
465 // `b`. Shorten any end-point intersections.
466 // [ - a - ]
467 // [ - b - ]
468 // -
469 // [ r ]
470 auto Next = FirstOverlap;
471 if (IntersectStart) {
472 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
473 << toString(FirstOverlap));
474 if (*AIt && *AIt == *FirstOverlap)
475 Result.insert(AIt.start(), FirstOverlap.stop(), *AIt);
476 ++Next;
477 }
478 // [ - a - ]
479 // [ - b - ]
480 // -
481 // [ r ]
482 if (IntersectEnd) {
483 LLVM_DEBUG(dbgs() << "- insert intersection of a and "
484 << toString(LastOverlap));
485 if (*AIt && *AIt == *LastOverlap)
486 Result.insert(LastOverlap.start(), AIt.stop(), *AIt);
487 }
488
489 // Insert all intervals in map `B` that are contained within interval
490 // `a` where the values match.
491 // [ - - a - - ]
492 // [ b1 ] [ b2 ]
493 // -
494 // [ r1 ] [ r2 ]
495 while (Next != B.end() && Next.start() < AIt.stop() &&
496 Next.stop() <= AIt.stop()) {
498 << "- insert intersection of a and " << toString(Next));
499 if (*AIt && *AIt == *Next)
500 Result.insert(Next.start(), Next.stop(), *Next);
501 ++Next;
502 }
503 }
504 }
505 return Result;
506 }
507
508 /// Meet \p A and \p B, storing the result in \p A.
509 void meetVars(VarFragMap &A, const VarFragMap &B) {
510 // Meet A and B.
511 //
512 // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b)
513 for (auto It = A.begin(), End = A.end(); It != End; ++It) {
514 unsigned AVar = It->first;
515 FragsInMemMap &AFrags = It->second;
516 auto BIt = B.find(AVar);
517 if (BIt == B.end()) {
518 A.erase(It);
519 continue; // Var has no bits defined in B.
520 }
521 LLVM_DEBUG(dbgs() << "meet fragment maps for "
522 << Aggregates[AVar].first->getName() << "\n");
523 AFrags = meetFragments(AFrags, BIt->second);
524 }
525 }
526
527 bool meet(const BasicBlock &BB,
528 const SmallPtrSet<BasicBlock *, 16> &Visited) {
529 LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName()
530 << "\n");
531
532 VarFragMap BBLiveIn;
533 bool FirstMeet = true;
534 // LiveIn locs for BB is the meet of the already-processed preds' LiveOut
535 // locs.
536 for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) {
537 // Ignore preds that haven't been processed yet. This is essentially the
538 // same as initialising all variables to implicit top value (⊤) which is
539 // the identity value for the meet operation.
540 const BasicBlock *Pred = *I;
541 if (!Visited.count(Pred))
542 continue;
543
544 auto PredLiveOut = LiveOut.find(Pred);
545 assert(PredLiveOut != LiveOut.end());
546
547 if (FirstMeet) {
548 LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n");
549 BBLiveIn = PredLiveOut->second;
550 FirstMeet = false;
551 } else {
552 LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName()
553 << "\n");
554 meetVars(BBLiveIn, PredLiveOut->second);
555 }
556
557 // An empty set is ⊥ for the intersect-like meet operation. If we've
558 // already got ⊥ there's no need to run the code - we know the result is
559 // ⊥ since `meet(a, ⊥) = ⊥`.
560 if (BBLiveIn.size() == 0)
561 break;
562 }
563
564 auto CurrentLiveInEntry = LiveIn.find(&BB);
565 // If there's no LiveIn entry for the block yet, add it.
566 if (CurrentLiveInEntry == LiveIn.end()) {
567 LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName()
568 << "\n");
569 LiveIn[&BB] = std::move(BBLiveIn);
570 return /*Changed=*/true;
571 }
572
573 // If the LiveIn set has changed (expensive check) update it and return
574 // true.
575 if (!varFragMapsAreEqual(BBLiveIn, CurrentLiveInEntry->second)) {
576 LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n");
577 CurrentLiveInEntry->second = std::move(BBLiveIn);
578 return /*Changed=*/true;
579 }
580
581 LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n");
582 return /*Changed=*/false;
583 }
584
585 void insertMemLoc(BasicBlock &BB, Instruction &Before, unsigned Var,
586 unsigned StartBit, unsigned EndBit, unsigned Base,
587 DebugLoc DL) {
588 assert(StartBit < EndBit && "Cannot create fragment of size <= 0");
589 if (!Base)
590 return;
591 FragMemLoc Loc;
592 Loc.Var = Var;
593 Loc.OffsetInBits = StartBit;
594 Loc.SizeInBits = EndBit - StartBit;
595 assert(Base && "Expected a non-zero ID for Base address");
596 Loc.Base = Base;
597 Loc.DL = DL;
598 BBInsertBeforeMap[&BB][&Before].push_back(Loc);
599 LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName()
600 << " bits [" << StartBit << ", " << EndBit << ")\n");
601 }
602
603 /// Inserts a new dbg def if the interval found when looking up \p StartBit
604 /// in \p FragMap starts before \p StartBit or ends after \p EndBit (which
605 /// indicates - assuming StartBit->EndBit has just been inserted - that the
606 /// slice has been coalesced in the map).
607 void coalesceFragments(BasicBlock &BB, Instruction &Before, unsigned Var,
608 unsigned StartBit, unsigned EndBit, unsigned Base,
609 DebugLoc DL, const FragsInMemMap &FragMap) {
610 if (!CoalesceAdjacentFragments)
611 return;
612 // We've inserted the location into the map. The map will have coalesced
613 // adjacent intervals (variable fragments) that describe the same memory
614 // location. Use this knowledge to insert a debug location that describes
615 // that coalesced fragment. This may eclipse other locs we've just
616 // inserted. This is okay as redundant locs will be cleaned up later.
617 auto CoalescedFrag = FragMap.find(StartBit);
618 // Bail if no coalescing has taken place.
619 if (CoalescedFrag.start() == StartBit && CoalescedFrag.stop() == EndBit)
620 return;
621
622 LLVM_DEBUG(dbgs() << "- Insert loc for bits " << CoalescedFrag.start()
623 << " to " << CoalescedFrag.stop() << "\n");
624 insertMemLoc(BB, Before, Var, CoalescedFrag.start(), CoalescedFrag.stop(),
625 Base, DL);
626 }
627
628 void addDef(const VarLocInfo &VarLoc, Instruction &Before, BasicBlock &BB,
629 VarFragMap &LiveSet) {
630 DebugVariable DbgVar = FnVarLocs->getVariable(VarLoc.VariableID);
631 if (skipVariable(DbgVar.getVariable()))
632 return;
633 // Don't bother doing anything for this variables if we know it's fully
634 // promoted. We're only interested in variables that (sometimes) live on
635 // the stack here.
636 if (!VarsWithStackSlot->count(getAggregate(DbgVar)))
637 return;
638 unsigned Var = Aggregates.insert(
639 DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt()));
640
641 // [StartBit: EndBit) are the bits affected by this def.
642 const DIExpression *DIExpr = VarLoc.Expr;
643 unsigned StartBit;
644 unsigned EndBit;
645 if (auto Frag = DIExpr->getFragmentInfo()) {
646 StartBit = Frag->OffsetInBits;
647 EndBit = StartBit + Frag->SizeInBits;
648 } else {
649 assert(static_cast<bool>(DbgVar.getVariable()->getSizeInBits()));
650 StartBit = 0;
651 EndBit = *DbgVar.getVariable()->getSizeInBits();
652 }
653
654 // We will only fill fragments for simple memory-describing dbg.value
655 // intrinsics. If the fragment offset is the same as the offset from the
656 // base pointer, do The Thing, otherwise fall back to normal dbg.value
657 // behaviour. AssignmentTrackingLowering has generated DIExpressions
658 // written in terms of the base pointer.
659 // TODO: Remove this condition since the fragment offset doesn't always
660 // equal the offset from base pointer (e.g. for a SROA-split variable).
661 const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr);
662 const unsigned Base =
663 DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit
664 ? Bases.insert(VarLoc.Values)
665 : 0;
666 LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " ["
667 << StartBit << ", " << EndBit << "): " << toString(Base)
668 << "\n");
669
670 // First of all, any locs that use mem that are disrupted need reinstating.
671 // Unfortunately, IntervalMap doesn't let us insert intervals that overlap
672 // with existing intervals so this code involves a lot of fiddling around
673 // with intervals to do that manually.
674 auto FragIt = LiveSet.find(Var);
675
676 // Check if the variable does not exist in the map.
677 if (FragIt == LiveSet.end()) {
678 // Add this variable to the BB map.
679 auto P = LiveSet.try_emplace(Var, FragsInMemMap(IntervalMapAlloc));
680 assert(P.second && "Var already in map?");
681 // Add the interval to the fragment map.
682 P.first->second.insert(StartBit, EndBit, Base);
683 return;
684 }
685 // The variable has an entry in the map.
686
687 FragsInMemMap &FragMap = FragIt->second;
688 // First check the easy case: the new fragment `f` doesn't overlap with any
689 // intervals.
690 if (!FragMap.overlaps(StartBit, EndBit)) {
691 LLVM_DEBUG(dbgs() << "- No overlaps\n");
692 FragMap.insert(StartBit, EndBit, Base);
693 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, VarLoc.DL,
694 FragMap);
695 return;
696 }
697 // There is at least one overlap.
698
699 // Does StartBit intersect an existing fragment?
700 auto FirstOverlap = FragMap.find(StartBit);
701 assert(FirstOverlap != FragMap.end());
702 bool IntersectStart = FirstOverlap.start() < StartBit;
703
704 // Does EndBit intersect an existing fragment?
705 auto LastOverlap = FragMap.find(EndBit);
706 bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit;
707
708 // Check if both ends of `f` intersect the same interval `i`.
709 if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) {
710 LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n");
711 // Shorten `i` so that there's space to insert `f`.
712 // [ f ]
713 // [ - i - ]
714 // +
715 // [ i ][ f ][ i ]
716
717 // Save values for use after inserting a new interval.
718 auto EndBitOfOverlap = FirstOverlap.stop();
719 unsigned OverlapValue = FirstOverlap.value();
720
721 // Shorten the overlapping interval.
722 FirstOverlap.setStop(StartBit);
723 insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
724 OverlapValue, VarLoc.DL);
725
726 // Insert a new interval to represent the end part.
727 FragMap.insert(EndBit, EndBitOfOverlap, OverlapValue);
728 insertMemLoc(BB, Before, Var, EndBit, EndBitOfOverlap, OverlapValue,
729 VarLoc.DL);
730
731 // Insert the new (middle) fragment now there is space.
732 FragMap.insert(StartBit, EndBit, Base);
733 } else {
734 // There's an overlap but `f` may not be fully contained within
735 // `i`. Shorten any end-point intersections so that we can then
736 // insert `f`.
737 // [ - f - ]
738 // [ - i - ]
739 // | |
740 // [ i ]
741 // Shorten any end-point intersections.
742 if (IntersectStart) {
743 LLVM_DEBUG(dbgs() << "- Intersect interval at start\n");
744 // Split off at the intersection.
745 FirstOverlap.setStop(StartBit);
746 insertMemLoc(BB, Before, Var, FirstOverlap.start(), StartBit,
747 *FirstOverlap, VarLoc.DL);
748 }
749 // [ - f - ]
750 // [ - i - ]
751 // | |
752 // [ i ]
753 if (IntersectEnd) {
754 LLVM_DEBUG(dbgs() << "- Intersect interval at end\n");
755 // Split off at the intersection.
756 LastOverlap.setStart(EndBit);
757 insertMemLoc(BB, Before, Var, EndBit, LastOverlap.stop(), *LastOverlap,
758 VarLoc.DL);
759 }
760
761 LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n");
762 // FirstOverlap and LastOverlap have been shortened such that they're
763 // no longer overlapping with [StartBit, EndBit). Delete any overlaps
764 // that remain (these will be fully contained within `f`).
765 // [ - f - ] }
766 // [ - i - ] } Intersection shortening that has happened above.
767 // | | }
768 // [ i ] }
769 // -----------------
770 // [i2 ] } Intervals fully contained within `f` get erased.
771 // -----------------
772 // [ - f - ][ i ] } Completed insertion.
773 auto It = FirstOverlap;
774 if (IntersectStart)
775 ++It; // IntersectStart: first overlap has been shortened.
776 while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) {
777 LLVM_DEBUG(dbgs() << "- Erase " << toString(It));
778 It.erase(); // This increments It after removing the interval.
779 }
780 // We've dealt with all the overlaps now!
781 assert(!FragMap.overlaps(StartBit, EndBit));
782 LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n");
783 FragMap.insert(StartBit, EndBit, Base);
784 }
785
786 coalesceFragments(BB, Before, Var, StartBit, EndBit, Base, VarLoc.DL,
787 FragMap);
788 }
789
790 bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); }
791
792 void process(BasicBlock &BB, VarFragMap &LiveSet) {
793 BBInsertBeforeMap[&BB].clear();
794 for (auto &I : BB) {
795 if (const auto *Locs = FnVarLocs->getWedge(&I)) {
796 for (const VarLocInfo &Loc : *Locs) {
797 addDef(Loc, I, *I.getParent(), LiveSet);
798 }
799 }
800 }
801 }
802
803public:
804 MemLocFragmentFill(Function &Fn,
805 const DenseSet<DebugAggregate> *VarsWithStackSlot,
806 bool CoalesceAdjacentFragments)
807 : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot),
808 CoalesceAdjacentFragments(CoalesceAdjacentFragments) {}
809
810 /// Add variable locations to \p FnVarLocs so that any bits of a variable
811 /// with a memory location have that location explicitly reinstated at each
812 /// subsequent variable location definition that that doesn't overwrite those
813 /// bits. i.e. after a variable location def, insert new defs for the memory
814 /// location with fragments for the difference of "all bits currently in
815 /// memory" and "the fragment of the second def". e.g.
816 ///
817 /// Before:
818 ///
819 /// var x bits 0 to 63: value in memory
820 /// more instructions
821 /// var x bits 0 to 31: value is %0
822 ///
823 /// After:
824 ///
825 /// var x bits 0 to 63: value in memory
826 /// more instructions
827 /// var x bits 0 to 31: value is %0
828 /// var x bits 32 to 61: value in memory ; <-- new loc def
829 ///
830 void run(FunctionVarLocsBuilder *FnVarLocs) {
832 return;
833
834 this->FnVarLocs = FnVarLocs;
835
836 // Prepare for traversal.
837 //
839 std::priority_queue<unsigned int, std::vector<unsigned int>,
840 std::greater<unsigned int>>
841 Worklist;
842 std::priority_queue<unsigned int, std::vector<unsigned int>,
843 std::greater<unsigned int>>
844 Pending;
847 { // Init OrderToBB and BBToOrder.
848 unsigned int RPONumber = 0;
849 for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) {
850 OrderToBB[RPONumber] = *RI;
851 BBToOrder[*RI] = RPONumber;
852 Worklist.push(RPONumber);
853 ++RPONumber;
854 }
855 LiveIn.init(RPONumber);
856 LiveOut.init(RPONumber);
857 }
858
859 // Perform the traversal.
860 //
861 // This is a standard "intersect of predecessor outs" dataflow problem. To
862 // solve it, we perform meet() and process() using the two worklist method
863 // until the LiveIn data for each block becomes unchanging.
864 //
865 // This dataflow is essentially working on maps of sets and at each meet we
866 // intersect the maps and the mapped sets. So, initialized live-in maps
867 // monotonically decrease in value throughout the dataflow.
869 while (!Worklist.empty() || !Pending.empty()) {
870 // We track what is on the pending worklist to avoid inserting the same
871 // thing twice. We could avoid this with a custom priority queue, but
872 // this is probably not worth it.
874 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
875 while (!Worklist.empty()) {
876 BasicBlock *BB = OrderToBB[Worklist.top()];
877 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
878 Worklist.pop();
879 bool InChanged = meet(*BB, Visited);
880 // Always consider LiveIn changed on the first visit.
881 InChanged |= Visited.insert(BB).second;
882 if (InChanged) {
884 << BB->getName() << " has new InLocs, process it\n");
885 // Mutate a copy of LiveIn while processing BB. Once we've processed
886 // the terminator LiveSet is the LiveOut set for BB.
887 // This is an expensive copy!
888 VarFragMap LiveSet = LiveIn[BB];
889
890 // Process the instructions in the block.
891 process(*BB, LiveSet);
892
893 // Relatively expensive check: has anything changed in LiveOut for BB?
894 if (!varFragMapsAreEqual(LiveOut[BB], LiveSet)) {
895 LLVM_DEBUG(dbgs() << BB->getName()
896 << " has new OutLocs, add succs to worklist: [ ");
897 LiveOut[BB] = std::move(LiveSet);
898 for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
899 if (OnPending.insert(*I).second) {
900 LLVM_DEBUG(dbgs() << I->getName() << " ");
901 Pending.push(BBToOrder[*I]);
902 }
903 }
904 LLVM_DEBUG(dbgs() << "]\n");
905 }
906 }
907 }
908 Worklist.swap(Pending);
909 // At this point, pending must be empty, since it was just the empty
910 // worklist
911 assert(Pending.empty() && "Pending should be empty");
912 }
913
914 // Insert new location defs.
915 for (auto &Pair : BBInsertBeforeMap) {
916 InsertMap &Map = Pair.second;
917 for (auto &Pair : Map) {
918 Instruction *InsertBefore = Pair.first;
919 assert(InsertBefore && "should never be null");
920 auto FragMemLocs = Pair.second;
921 auto &Ctx = Fn.getContext();
922
923 for (auto &FragMemLoc : FragMemLocs) {
924 DIExpression *Expr = DIExpression::get(Ctx, std::nullopt);
925 if (FragMemLoc.SizeInBits !=
926 *Aggregates[FragMemLoc.Var].first->getSizeInBits())
928 Expr, FragMemLoc.OffsetInBits, FragMemLoc.SizeInBits);
930 FragMemLoc.OffsetInBits / 8);
931 DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr,
932 FragMemLoc.DL.getInlinedAt());
933 FnVarLocs->addVarLoc(InsertBefore, Var, Expr, FragMemLoc.DL,
934 Bases[FragMemLoc.Base]);
935 }
936 }
937 }
938 }
939};
940
941/// AssignmentTrackingLowering encapsulates a dataflow analysis over a function
942/// that interprets assignment tracking debug info metadata and stores in IR to
943/// create a map of variable locations.
944class AssignmentTrackingLowering {
945public:
946 /// The kind of location in use for a variable, where Mem is the stack home,
947 /// Val is an SSA value or const, and None means that there is not one single
948 /// kind (either because there are multiple or because there is none; it may
949 /// prove useful to split this into two values in the future).
950 ///
951 /// LocKind is a join-semilattice with the partial order:
952 /// None > Mem, Val
953 ///
954 /// i.e.
955 /// join(Mem, Mem) = Mem
956 /// join(Val, Val) = Val
957 /// join(Mem, Val) = None
958 /// join(None, Mem) = None
959 /// join(None, Val) = None
960 /// join(None, None) = None
961 ///
962 /// Note: the order is not `None > Val > Mem` because we're using DIAssignID
963 /// to name assignments and are not tracking the actual stored values.
964 /// Therefore currently there's no way to ensure that Mem values and Val
965 /// values are the same. This could be a future extension, though it's not
966 /// clear that many additional locations would be recovered that way in
967 /// practice as the likelihood of this sitation arising naturally seems
968 /// incredibly low.
969 enum class LocKind { Mem, Val, None };
970
971 /// An abstraction of the assignment of a value to a variable or memory
972 /// location.
973 ///
974 /// An Assignment is Known or NoneOrPhi. A Known Assignment means we have a
975 /// DIAssignID ptr that represents it. NoneOrPhi means that we don't (or
976 /// can't) know the ID of the last assignment that took place.
977 ///
978 /// The Status of the Assignment (Known or NoneOrPhi) is another
979 /// join-semilattice. The partial order is:
980 /// NoneOrPhi > Known {id_0, id_1, ...id_N}
981 ///
982 /// i.e. for all values x and y where x != y:
983 /// join(x, x) = x
984 /// join(x, y) = NoneOrPhi
985 struct Assignment {
986 enum S { Known, NoneOrPhi } Status;
987 /// ID of the assignment. nullptr if Status is not Known.
988 DIAssignID *ID;
989 /// The dbg.assign that marks this dbg-def. Mem-defs don't use this field.
990 /// May be nullptr.
992
993 bool isSameSourceAssignment(const Assignment &Other) const {
994 // Don't include Source in the equality check. Assignments are
995 // defined by their ID, not debug intrinsic(s).
996 return std::tie(Status, ID) == std::tie(Other.Status, Other.ID);
997 }
998 void dump(raw_ostream &OS) {
999 static const char *LUT[] = {"Known", "NoneOrPhi"};
1000 OS << LUT[Status] << "(id=";
1001 if (ID)
1002 OS << ID;
1003 else
1004 OS << "null";
1005 OS << ", s=";
1006 if (Source)
1007 OS << *Source;
1008 else
1009 OS << "null";
1010 OS << ")";
1011 }
1012
1013 static Assignment make(DIAssignID *ID, DbgAssignIntrinsic *Source) {
1014 return Assignment(Known, ID, Source);
1015 }
1016 static Assignment makeFromMemDef(DIAssignID *ID) {
1017 return Assignment(Known, ID, nullptr);
1018 }
1019 static Assignment makeNoneOrPhi() {
1020 return Assignment(NoneOrPhi, nullptr, nullptr);
1021 }
1022 // Again, need a Top value?
1023 Assignment()
1024 : Status(NoneOrPhi), ID(nullptr), Source(nullptr) {
1025 } // Can we delete this?
1026 Assignment(S Status, DIAssignID *ID, DbgAssignIntrinsic *Source)
1027 : Status(Status), ID(ID), Source(Source) {
1028 // If the Status is Known then we expect there to be an assignment ID.
1029 assert(Status == NoneOrPhi || ID);
1030 }
1031 };
1032
1033 using AssignmentMap = SmallVector<Assignment>;
1036 using UntaggedStoreAssignmentMap =
1037 DenseMap<const Instruction *,
1039
1040private:
1041 /// The highest numbered VariableID for partially promoted variables plus 1,
1042 /// the values for which start at 1.
1043 unsigned TrackedVariablesVectorSize = 0;
1044 /// Map a variable to the set of variables that it fully contains.
1045 OverlapMap VarContains;
1046 /// Map untagged stores to the variable fragments they assign to. Used by
1047 /// processUntaggedInstruction.
1048 UntaggedStoreAssignmentMap UntaggedStoreVars;
1049
1050 // Machinery to defer inserting dbg.values.
1052 InsertMap InsertBeforeMap;
1053 /// Clear the location definitions currently cached for insertion after /p
1054 /// After.
1055 void resetInsertionPoint(Instruction &After);
1056 void emitDbgValue(LocKind Kind, const DbgVariableIntrinsic *Source,
1058
1059 static bool mapsAreEqual(const BitVector &Mask, const AssignmentMap &A,
1060 const AssignmentMap &B) {
1061 return llvm::all_of(Mask.set_bits(), [&](unsigned VarID) {
1062 return A[VarID].isSameSourceAssignment(B[VarID]);
1063 });
1064 }
1065
1066 /// Represents the stack and debug assignments in a block. Used to describe
1067 /// the live-in and live-out values for blocks, as well as the "current"
1068 /// value as we process each instruction in a block.
1069 struct BlockInfo {
1070 /// The set of variables (VariableID) being tracked in this block.
1071 BitVector VariableIDsInBlock;
1072 /// Dominating assignment to memory for each variable, indexed by
1073 /// VariableID.
1074 AssignmentMap StackHomeValue;
1075 /// Dominating assignemnt to each variable, indexed by VariableID.
1076 AssignmentMap DebugValue;
1077 /// Location kind for each variable. LiveLoc indicates whether the
1078 /// dominating assignment in StackHomeValue (LocKind::Mem), DebugValue
1079 /// (LocKind::Val), or neither (LocKind::None) is valid, in that order of
1080 /// preference. This cannot be derived by inspecting DebugValue and
1081 /// StackHomeValue due to the fact that there's no distinction in
1082 /// Assignment (the class) between whether an assignment is unknown or a
1083 /// merge of multiple assignments (both are Status::NoneOrPhi). In other
1084 /// words, the memory location may well be valid while both DebugValue and
1085 /// StackHomeValue contain Assignments that have a Status of NoneOrPhi.
1086 /// Indexed by VariableID.
1087 LocMap LiveLoc;
1088
1089 public:
1090 enum AssignmentKind { Stack, Debug };
1091 const AssignmentMap &getAssignmentMap(AssignmentKind Kind) const {
1092 switch (Kind) {
1093 case Stack:
1094 return StackHomeValue;
1095 case Debug:
1096 return DebugValue;
1097 }
1098 llvm_unreachable("Unknown AssignmentKind");
1099 }
1100 AssignmentMap &getAssignmentMap(AssignmentKind Kind) {
1101 return const_cast<AssignmentMap &>(
1102 const_cast<const BlockInfo *>(this)->getAssignmentMap(Kind));
1103 }
1104
1105 bool isVariableTracked(VariableID Var) const {
1106 return VariableIDsInBlock[static_cast<unsigned>(Var)];
1107 }
1108
1109 const Assignment &getAssignment(AssignmentKind Kind, VariableID Var) const {
1110 assert(isVariableTracked(Var) && "Var not tracked in block");
1111 return getAssignmentMap(Kind)[static_cast<unsigned>(Var)];
1112 }
1113
1114 LocKind getLocKind(VariableID Var) const {
1115 assert(isVariableTracked(Var) && "Var not tracked in block");
1116 return LiveLoc[static_cast<unsigned>(Var)];
1117 }
1118
1119 /// Set LocKind for \p Var only: does not set LocKind for VariableIDs of
1120 /// fragments contained win \p Var.
1121 void setLocKind(VariableID Var, LocKind K) {
1122 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1123 LiveLoc[static_cast<unsigned>(Var)] = K;
1124 }
1125
1126 /// Set the assignment in the \p Kind assignment map for \p Var only: does
1127 /// not set the assignment for VariableIDs of fragments contained win \p
1128 /// Var.
1129 void setAssignment(AssignmentKind Kind, VariableID Var,
1130 const Assignment &AV) {
1131 VariableIDsInBlock.set(static_cast<unsigned>(Var));
1132 getAssignmentMap(Kind)[static_cast<unsigned>(Var)] = AV;
1133 }
1134
1135 /// Return true if there is an assignment matching \p AV in the \p Kind
1136 /// assignment map. Does consider assignments for VariableIDs of fragments
1137 /// contained win \p Var.
1138 bool hasAssignment(AssignmentKind Kind, VariableID Var,
1139 const Assignment &AV) const {
1140 if (!isVariableTracked(Var))
1141 return false;
1142 return AV.isSameSourceAssignment(getAssignment(Kind, Var));
1143 }
1144
1145 /// Compare every element in each map to determine structural equality
1146 /// (slow).
1147 bool operator==(const BlockInfo &Other) const {
1148 return VariableIDsInBlock == Other.VariableIDsInBlock &&
1149 LiveLoc == Other.LiveLoc &&
1150 mapsAreEqual(VariableIDsInBlock, StackHomeValue,
1151 Other.StackHomeValue) &&
1152 mapsAreEqual(VariableIDsInBlock, DebugValue, Other.DebugValue);
1153 }
1154 bool operator!=(const BlockInfo &Other) const { return !(*this == Other); }
1155 bool isValid() {
1156 return LiveLoc.size() == DebugValue.size() &&
1157 LiveLoc.size() == StackHomeValue.size();
1158 }
1159
1160 /// Clear everything and initialise with ⊤-values for all variables.
1161 void init(int NumVars) {
1162 StackHomeValue.clear();
1163 DebugValue.clear();
1164 LiveLoc.clear();
1165 VariableIDsInBlock = BitVector(NumVars);
1166 StackHomeValue.insert(StackHomeValue.begin(), NumVars,
1167 Assignment::makeNoneOrPhi());
1168 DebugValue.insert(DebugValue.begin(), NumVars,
1169 Assignment::makeNoneOrPhi());
1170 LiveLoc.insert(LiveLoc.begin(), NumVars, LocKind::None);
1171 }
1172
1173 /// Helper for join.
1174 template <typename ElmtType, typename FnInputType>
1175 static void joinElmt(int Index, SmallVector<ElmtType> &Target,
1176 const SmallVector<ElmtType> &A,
1177 const SmallVector<ElmtType> &B,
1178 ElmtType (*Fn)(FnInputType, FnInputType)) {
1179 Target[Index] = Fn(A[Index], B[Index]);
1180 }
1181
1182 /// See comment for AssignmentTrackingLowering::joinBlockInfo.
1183 static BlockInfo join(const BlockInfo &A, const BlockInfo &B, int NumVars) {
1184 // Join A and B.
1185 //
1186 // Intersect = join(a, b) for a in A, b in B where Var(a) == Var(b)
1187 // Difference = join(x, ⊤) for x where Var(x) is in A xor B
1188 // Join = Intersect ∪ Difference
1189 //
1190 // This is achieved by performing a join on elements from A and B with
1191 // variables common to both A and B (join elements indexed by var
1192 // intersect), then adding ⊤-value elements for vars in A xor B. The
1193 // latter part is equivalent to performing join on elements with variables
1194 // in A xor B with the ⊤-value for the map element since join(x, ⊤) = ⊤.
1195 // BlockInfo::init initializes all variable entries to the ⊤ value so we
1196 // don't need to explicitly perform that step as Join.VariableIDsInBlock
1197 // is set to the union of the variables in A and B at the end of this
1198 // function.
1199 BlockInfo Join;
1200 Join.init(NumVars);
1201
1202 BitVector Intersect = A.VariableIDsInBlock;
1203 Intersect &= B.VariableIDsInBlock;
1204
1205 for (auto VarID : Intersect.set_bits()) {
1206 joinElmt(VarID, Join.LiveLoc, A.LiveLoc, B.LiveLoc, joinKind);
1207 joinElmt(VarID, Join.DebugValue, A.DebugValue, B.DebugValue,
1208 joinAssignment);
1209 joinElmt(VarID, Join.StackHomeValue, A.StackHomeValue, B.StackHomeValue,
1210 joinAssignment);
1211 }
1212
1213 Join.VariableIDsInBlock = A.VariableIDsInBlock;
1214 Join.VariableIDsInBlock |= B.VariableIDsInBlock;
1215 assert(Join.isValid());
1216 return Join;
1217 }
1218 };
1219
1220 Function &Fn;
1221 const DataLayout &Layout;
1222 const DenseSet<DebugAggregate> *VarsWithStackSlot;
1223 FunctionVarLocsBuilder *FnVarLocs;
1226
1227 /// Helper for process methods to track variables touched each frame.
1228 DenseSet<VariableID> VarsTouchedThisFrame;
1229
1230 /// The set of variables that sometimes are not located in their stack home.
1231 DenseSet<DebugAggregate> NotAlwaysStackHomed;
1232
1233 VariableID getVariableID(const DebugVariable &Var) {
1234 return static_cast<VariableID>(FnVarLocs->insertVariable(Var));
1235 }
1236
1237 /// Join the LiveOut values of preds that are contained in \p Visited into
1238 /// LiveIn[BB]. Return True if LiveIn[BB] has changed as a result. LiveIn[BB]
1239 /// values monotonically increase. See the @link joinMethods join methods
1240 /// @endlink documentation for more info.
1241 bool join(const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited);
1242 ///@name joinMethods
1243 /// Functions that implement `join` (the least upper bound) for the
1244 /// join-semilattice types used in the dataflow. There is an explicit bottom
1245 /// value (⊥) for some types and and explicit top value (⊤) for all types.
1246 /// By definition:
1247 ///
1248 /// Join(A, B) >= A && Join(A, B) >= B
1249 /// Join(A, ⊥) = A
1250 /// Join(A, ⊤) = ⊤
1251 ///
1252 /// These invariants are important for monotonicity.
1253 ///
1254 /// For the map-type functions, all unmapped keys in an empty map are
1255 /// associated with a bottom value (⊥). This represents their values being
1256 /// unknown. Unmapped keys in non-empty maps (joining two maps with a key
1257 /// only present in one) represents either a variable going out of scope or
1258 /// dropped debug info. It is assumed the key is associated with a top value
1259 /// (⊤) in this case (unknown location / assignment).
1260 ///@{
1261 static LocKind joinKind(LocKind A, LocKind B);
1262 static Assignment joinAssignment(const Assignment &A, const Assignment &B);
1263 BlockInfo joinBlockInfo(const BlockInfo &A, const BlockInfo &B);
1264 ///@}
1265
1266 /// Process the instructions in \p BB updating \p LiveSet along the way. \p
1267 /// LiveSet must be initialized with the current live-in locations before
1268 /// calling this.
1269 void process(BasicBlock &BB, BlockInfo *LiveSet);
1270 ///@name processMethods
1271 /// Methods to process instructions in order to update the LiveSet (current
1272 /// location information).
1273 ///@{
1274 void processNonDbgInstruction(Instruction &I, BlockInfo *LiveSet);
1275 void processDbgInstruction(DbgInfoIntrinsic &I, BlockInfo *LiveSet);
1276 /// Update \p LiveSet after encountering an instruction with a DIAssignID
1277 /// attachment, \p I.
1278 void processTaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1279 /// Update \p LiveSet after encountering an instruciton without a DIAssignID
1280 /// attachment, \p I.
1281 void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet);
1282 void processDbgAssign(DbgAssignIntrinsic &DAI, BlockInfo *LiveSet);
1283 void processDbgValue(DbgValueInst &DVI, BlockInfo *LiveSet);
1284 /// Add an assignment to memory for the variable /p Var.
1285 void addMemDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1286 /// Add an assignment to the variable /p Var.
1287 void addDbgDef(BlockInfo *LiveSet, VariableID Var, const Assignment &AV);
1288 ///@}
1289
1290 /// Set the LocKind for \p Var.
1291 void setLocKind(BlockInfo *LiveSet, VariableID Var, LocKind K);
1292 /// Get the live LocKind for a \p Var. Requires addMemDef or addDbgDef to
1293 /// have been called for \p Var first.
1294 LocKind getLocKind(BlockInfo *LiveSet, VariableID Var);
1295 /// Return true if \p Var has an assignment in \p M matching \p AV.
1296 bool hasVarWithAssignment(BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind,
1297 VariableID Var, const Assignment &AV);
1298 /// Return the set of VariableIDs corresponding the fragments contained fully
1299 /// within the variable/fragment \p Var.
1300 ArrayRef<VariableID> getContainedFragments(VariableID Var) const;
1301
1302 /// Mark \p Var as having been touched this frame. Note, this applies only
1303 /// to the exact fragment \p Var and not to any fragments contained within.
1304 void touchFragment(VariableID Var);
1305
1306 /// Emit info for variables that are fully promoted.
1307 bool emitPromotedVarLocs(FunctionVarLocsBuilder *FnVarLocs);
1308
1309public:
1310 AssignmentTrackingLowering(Function &Fn, const DataLayout &Layout,
1311 const DenseSet<DebugAggregate> *VarsWithStackSlot)
1312 : Fn(Fn), Layout(Layout), VarsWithStackSlot(VarsWithStackSlot) {}
1313 /// Run the analysis, adding variable location info to \p FnVarLocs. Returns
1314 /// true if any variable locations have been added to FnVarLocs.
1315 bool run(FunctionVarLocsBuilder *FnVarLocs);
1316};
1317} // namespace
1318
1320AssignmentTrackingLowering::getContainedFragments(VariableID Var) const {
1321 auto R = VarContains.find(Var);
1322 if (R == VarContains.end())
1323 return std::nullopt;
1324 return R->second;
1325}
1326
1327void AssignmentTrackingLowering::touchFragment(VariableID Var) {
1328 VarsTouchedThisFrame.insert(Var);
1329}
1330
1331void AssignmentTrackingLowering::setLocKind(BlockInfo *LiveSet, VariableID Var,
1332 LocKind K) {
1333 auto SetKind = [this](BlockInfo *LiveSet, VariableID Var, LocKind K) {
1334 LiveSet->setLocKind(Var, K);
1335 touchFragment(Var);
1336 };
1337 SetKind(LiveSet, Var, K);
1338
1339 // Update the LocKind for all fragments contained within Var.
1340 for (VariableID Frag : getContainedFragments(Var))
1341 SetKind(LiveSet, Frag, K);
1342}
1343
1344AssignmentTrackingLowering::LocKind
1345AssignmentTrackingLowering::getLocKind(BlockInfo *LiveSet, VariableID Var) {
1346 return LiveSet->getLocKind(Var);
1347}
1348
1349void AssignmentTrackingLowering::addMemDef(BlockInfo *LiveSet, VariableID Var,
1350 const Assignment &AV) {
1351 LiveSet->setAssignment(BlockInfo::Stack, Var, AV);
1352
1353 // Use this assigment for all fragments contained within Var, but do not
1354 // provide a Source because we cannot convert Var's value to a value for the
1355 // fragment.
1356 Assignment FragAV = AV;
1357 FragAV.Source = nullptr;
1358 for (VariableID Frag : getContainedFragments(Var))
1359 LiveSet->setAssignment(BlockInfo::Stack, Frag, FragAV);
1360}
1361
1362void AssignmentTrackingLowering::addDbgDef(BlockInfo *LiveSet, VariableID Var,
1363 const Assignment &AV) {
1364 LiveSet->setAssignment(BlockInfo::Debug, Var, AV);
1365
1366 // Use this assigment for all fragments contained within Var, but do not
1367 // provide a Source because we cannot convert Var's value to a value for the
1368 // fragment.
1369 Assignment FragAV = AV;
1370 FragAV.Source = nullptr;
1371 for (VariableID Frag : getContainedFragments(Var))
1372 LiveSet->setAssignment(BlockInfo::Debug, Frag, FragAV);
1373}
1374
1376 return cast<DIAssignID>(I.getMetadata(LLVMContext::MD_DIAssignID));
1377}
1378
1380 return cast<DIAssignID>(DAI.getAssignID());
1381}
1382
1383/// Return true if \p Var has an assignment in \p M matching \p AV.
1384bool AssignmentTrackingLowering::hasVarWithAssignment(
1385 BlockInfo *LiveSet, BlockInfo::AssignmentKind Kind, VariableID Var,
1386 const Assignment &AV) {
1387 if (!LiveSet->hasAssignment(Kind, Var, AV))
1388 return false;
1389
1390 // Check all the frags contained within Var as these will have all been
1391 // mapped to AV at the last store to Var.
1392 for (VariableID Frag : getContainedFragments(Var))
1393 if (!LiveSet->hasAssignment(Kind, Frag, AV))
1394 return false;
1395 return true;
1396}
1397
1398#ifndef NDEBUG
1399const char *locStr(AssignmentTrackingLowering::LocKind Loc) {
1400 using LocKind = AssignmentTrackingLowering::LocKind;
1401 switch (Loc) {
1402 case LocKind::Val:
1403 return "Val";
1404 case LocKind::Mem:
1405 return "Mem";
1406 case LocKind::None:
1407 return "None";
1408 };
1409 llvm_unreachable("unknown LocKind");
1410}
1411#endif
1412
1413void AssignmentTrackingLowering::emitDbgValue(
1414 AssignmentTrackingLowering::LocKind Kind,
1415 const DbgVariableIntrinsic *Source, Instruction *After) {
1416
1417 DILocation *DL = Source->getDebugLoc();
1418 auto Emit = [this, Source, After, DL](Metadata *Val, DIExpression *Expr) {
1419 assert(Expr);
1420 if (!Val)
1422 PoisonValue::get(Type::getInt1Ty(Source->getContext())));
1423
1424 // Find a suitable insert point.
1425 Instruction *InsertBefore = After->getNextNode();
1426 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1427
1428 VariableID Var = getVariableID(DebugVariable(Source));
1429 VarLocInfo VarLoc;
1430 VarLoc.VariableID = static_cast<VariableID>(Var);
1431 VarLoc.Expr = Expr;
1432 VarLoc.Values = RawLocationWrapper(Val);
1433 VarLoc.DL = DL;
1434 // Insert it into the map for later.
1435 InsertBeforeMap[InsertBefore].push_back(VarLoc);
1436 };
1437
1438 // NOTE: This block can mutate Kind.
1439 if (Kind == LocKind::Mem) {
1440 const auto *DAI = cast<DbgAssignIntrinsic>(Source);
1441 // Check the address hasn't been dropped (e.g. the debug uses may not have
1442 // been replaced before deleting a Value).
1443 if (DAI->isKillAddress()) {
1444 // The address isn't valid so treat this as a non-memory def.
1445 Kind = LocKind::Val;
1446 } else {
1447 Value *Val = DAI->getAddress();
1448 DIExpression *Expr = DAI->getAddressExpression();
1449 assert(!Expr->getFragmentInfo() &&
1450 "fragment info should be stored in value-expression only");
1451 // Copy the fragment info over from the value-expression to the new
1452 // DIExpression.
1453 if (auto OptFragInfo = Source->getExpression()->getFragmentInfo()) {
1454 auto FragInfo = *OptFragInfo;
1456 Expr, FragInfo.OffsetInBits, FragInfo.SizeInBits);
1457 }
1458 // The address-expression has an implicit deref, add it now.
1459 std::tie(Val, Expr) =
1460 walkToAllocaAndPrependOffsetDeref(Layout, Val, Expr);
1461 Emit(ValueAsMetadata::get(Val), Expr);
1462 return;
1463 }
1464 }
1465
1466 if (Kind == LocKind::Val) {
1467 Emit(Source->getRawLocation(), Source->getExpression());
1468 return;
1469 }
1470
1471 if (Kind == LocKind::None) {
1472 Emit(nullptr, Source->getExpression());
1473 return;
1474 }
1475}
1476
1477void AssignmentTrackingLowering::processNonDbgInstruction(
1478 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1479 if (I.hasMetadata(LLVMContext::MD_DIAssignID))
1480 processTaggedInstruction(I, LiveSet);
1481 else
1482 processUntaggedInstruction(I, LiveSet);
1483}
1484
1485void AssignmentTrackingLowering::processUntaggedInstruction(
1486 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1487 // Interpret stack stores that are not tagged as an assignment in memory for
1488 // the variables associated with that address. These stores may not be tagged
1489 // because a) the store cannot be represented using dbg.assigns (non-const
1490 // length or offset) or b) the tag was accidentally dropped during
1491 // optimisations. For these stores we fall back to assuming that the stack
1492 // home is a valid location for the variables. The benefit is that this
1493 // prevents us missing an assignment and therefore incorrectly maintaining
1494 // earlier location definitions, and in many cases it should be a reasonable
1495 // assumption. However, this will occasionally lead to slight
1496 // inaccuracies. The value of a hoisted untagged store will be visible
1497 // "early", for example.
1498 assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
1499 auto It = UntaggedStoreVars.find(&I);
1500 if (It == UntaggedStoreVars.end())
1501 return; // No variables associated with the store destination.
1502
1503 LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I
1504 << "\n");
1505 // Iterate over the variables that this store affects, add a NoneOrPhi dbg
1506 // and mem def, set lockind to Mem, and emit a location def for each.
1507 for (auto [Var, Info] : It->second) {
1508 // This instruction is treated as both a debug and memory assignment,
1509 // meaning the memory location should be used. We don't have an assignment
1510 // ID though so use Assignment::makeNoneOrPhi() to create an imaginary one.
1511 addMemDef(LiveSet, Var, Assignment::makeNoneOrPhi());
1512 addDbgDef(LiveSet, Var, Assignment::makeNoneOrPhi());
1513 setLocKind(LiveSet, Var, LocKind::Mem);
1514 LLVM_DEBUG(dbgs() << " setting Stack LocKind to: " << locStr(LocKind::Mem)
1515 << "\n");
1516 // Build the dbg location def to insert.
1517 //
1518 // DIExpression: Add fragment and offset.
1519 DebugVariable V = FnVarLocs->getVariable(Var);
1520 DIExpression *DIE = DIExpression::get(I.getContext(), std::nullopt);
1521 if (auto Frag = V.getFragment()) {
1522 auto R = DIExpression::createFragmentExpression(DIE, Frag->OffsetInBits,
1523 Frag->SizeInBits);
1524 assert(R && "unexpected createFragmentExpression failure");
1525 DIE = *R;
1526 }
1528 if (Info.OffsetInBits)
1529 Ops = {dwarf::DW_OP_plus_uconst, Info.OffsetInBits / 8};
1530 Ops.push_back(dwarf::DW_OP_deref);
1531 DIE = DIExpression::prependOpcodes(DIE, Ops, /*StackValue=*/false,
1532 /*EntryValue=*/false);
1533 // Find a suitable insert point.
1534 Instruction *InsertBefore = I.getNextNode();
1535 assert(InsertBefore && "Shouldn't be inserting after a terminator");
1536
1537 // Get DILocation for this unrecorded assignment.
1538 DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
1539 const DILocation *DILoc = DILocation::get(
1540 Fn.getContext(), 0, 0, V.getVariable()->getScope(), InlinedAt);
1541
1542 VarLocInfo VarLoc;
1543 VarLoc.VariableID = static_cast<VariableID>(Var);
1544 VarLoc.Expr = DIE;
1545 VarLoc.Values = RawLocationWrapper(
1546 ValueAsMetadata::get(const_cast<AllocaInst *>(Info.Base)));
1547 VarLoc.DL = DILoc;
1548 // 3. Insert it into the map for later.
1549 InsertBeforeMap[InsertBefore].push_back(VarLoc);
1550 }
1551}
1552
1553void AssignmentTrackingLowering::processTaggedInstruction(
1554 Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1555 auto Linked = at::getAssignmentMarkers(&I);
1556 // No dbg.assign intrinsics linked.
1557 // FIXME: All vars that have a stack slot this store modifies that don't have
1558 // a dbg.assign linked to it should probably treat this like an untagged
1559 // store.
1560 if (Linked.empty())
1561 return;
1562
1563 LLVM_DEBUG(dbgs() << "processTaggedInstruction on " << I << "\n");
1564 for (DbgAssignIntrinsic *DAI : Linked) {
1565 VariableID Var = getVariableID(DebugVariable(DAI));
1566 // Something has gone wrong if VarsWithStackSlot doesn't contain a variable
1567 // that is linked to a store.
1568 assert(VarsWithStackSlot->count(getAggregate(DAI)) &&
1569 "expected DAI's variable to have stack slot");
1570
1571 Assignment AV = Assignment::makeFromMemDef(getIDFromInst(I));
1572 addMemDef(LiveSet, Var, AV);
1573
1574 LLVM_DEBUG(dbgs() << " linked to " << *DAI << "\n");
1575 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1576 << " -> ");
1577
1578 // The last assignment to the stack is now AV. Check if the last debug
1579 // assignment has a matching Assignment.
1580 if (hasVarWithAssignment(LiveSet, BlockInfo::Debug, Var, AV)) {
1581 // The StackHomeValue and DebugValue for this variable match so we can
1582 // emit a stack home location here.
1583 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1584 LLVM_DEBUG(dbgs() << " Stack val: "; AV.dump(dbgs()); dbgs() << "\n");
1585 LLVM_DEBUG(dbgs() << " Debug val: ";
1586 LiveSet->DebugValue[static_cast<unsigned>(Var)].dump(dbgs());
1587 dbgs() << "\n");
1588 setLocKind(LiveSet, Var, LocKind::Mem);
1589 emitDbgValue(LocKind::Mem, DAI, &I);
1590 continue;
1591 }
1592
1593 // The StackHomeValue and DebugValue for this variable do not match. I.e.
1594 // The value currently stored in the stack is not what we'd expect to
1595 // see, so we cannot use emit a stack home location here. Now we will
1596 // look at the live LocKind for the variable and determine an appropriate
1597 // dbg.value to emit.
1598 LocKind PrevLoc = getLocKind(LiveSet, Var);
1599 switch (PrevLoc) {
1600 case LocKind::Val: {
1601 // The value in memory in memory has changed but we're not currently
1602 // using the memory location. Do nothing.
1603 LLVM_DEBUG(dbgs() << "Val, (unchanged)\n";);
1604 setLocKind(LiveSet, Var, LocKind::Val);
1605 } break;
1606 case LocKind::Mem: {
1607 // There's been an assignment to memory that we were using as a
1608 // location for this variable, and the Assignment doesn't match what
1609 // we'd expect to see in memory.
1610 Assignment DbgAV = LiveSet->getAssignment(BlockInfo::Debug, Var);
1611 if (DbgAV.Status == Assignment::NoneOrPhi) {
1612 // We need to terminate any previously open location now.
1613 LLVM_DEBUG(dbgs() << "None, No Debug value available\n";);
1614 setLocKind(LiveSet, Var, LocKind::None);
1615 emitDbgValue(LocKind::None, DAI, &I);
1616 } else {
1617 // The previous DebugValue Value can be used here.
1618 LLVM_DEBUG(dbgs() << "Val, Debug value is Known\n";);
1619 setLocKind(LiveSet, Var, LocKind::Val);
1620 if (DbgAV.Source) {
1621 emitDbgValue(LocKind::Val, DbgAV.Source, &I);
1622 } else {
1623 // PrevAV.Source is nullptr so we must emit undef here.
1624 emitDbgValue(LocKind::None, DAI, &I);
1625 }
1626 }
1627 } break;
1628 case LocKind::None: {
1629 // There's been an assignment to memory and we currently are
1630 // not tracking a location for the variable. Do not emit anything.
1631 LLVM_DEBUG(dbgs() << "None, (unchanged)\n";);
1632 setLocKind(LiveSet, Var, LocKind::None);
1633 } break;
1634 }
1635 }
1636}
1637
1638void AssignmentTrackingLowering::processDbgAssign(DbgAssignIntrinsic &DAI,
1639 BlockInfo *LiveSet) {
1640 // Only bother tracking variables that are at some point stack homed. Other
1641 // variables can be dealt with trivially later.
1642 if (!VarsWithStackSlot->count(getAggregate(&DAI)))
1643 return;
1644
1645 VariableID Var = getVariableID(DebugVariable(&DAI));
1646 Assignment AV = Assignment::make(getIDFromMarker(DAI), &DAI);
1647 addDbgDef(LiveSet, Var, AV);
1648
1649 LLVM_DEBUG(dbgs() << "processDbgAssign on " << DAI << "\n";);
1650 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1651 << " -> ");
1652
1653 // Check if the DebugValue and StackHomeValue both hold the same
1654 // Assignment.
1655 if (hasVarWithAssignment(LiveSet, BlockInfo::Stack, Var, AV)) {
1656 // They match. We can use the stack home because the debug intrinsics state
1657 // that an assignment happened here, and we know that specific assignment
1658 // was the last one to take place in memory for this variable.
1659 LocKind Kind;
1660 if (DAI.isKillAddress()) {
1661 LLVM_DEBUG(
1662 dbgs()
1663 << "Val, Stack matches Debug program but address is killed\n";);
1664 Kind = LocKind::Val;
1665 } else {
1666 LLVM_DEBUG(dbgs() << "Mem, Stack matches Debug program\n";);
1667 Kind = LocKind::Mem;
1668 };
1669 setLocKind(LiveSet, Var, Kind);
1670 emitDbgValue(Kind, &DAI, &DAI);
1671 } else {
1672 // The last assignment to the memory location isn't the one that we want to
1673 // show to the user so emit a dbg.value(Value). Value may be undef.
1674 LLVM_DEBUG(dbgs() << "Val, Stack contents is unknown\n";);
1675 setLocKind(LiveSet, Var, LocKind::Val);
1676 emitDbgValue(LocKind::Val, &DAI, &DAI);
1677 }
1678}
1679
1680void AssignmentTrackingLowering::processDbgValue(DbgValueInst &DVI,
1681 BlockInfo *LiveSet) {
1682 // Only other tracking variables that are at some point stack homed.
1683 // Other variables can be dealt with trivally later.
1684 if (!VarsWithStackSlot->count(getAggregate(&DVI)))
1685 return;
1686
1687 VariableID Var = getVariableID(DebugVariable(&DVI));
1688 // We have no ID to create an Assignment with so we mark this assignment as
1689 // NoneOrPhi. Note that the dbg.value still exists, we just cannot determine
1690 // the assignment responsible for setting this value.
1691 // This is fine; dbg.values are essentially interchangable with unlinked
1692 // dbg.assigns, and some passes such as mem2reg and instcombine add them to
1693 // PHIs for promoted variables.
1694 Assignment AV = Assignment::makeNoneOrPhi();
1695 addDbgDef(LiveSet, Var, AV);
1696
1697 LLVM_DEBUG(dbgs() << "processDbgValue on " << DVI << "\n";);
1698 LLVM_DEBUG(dbgs() << " LiveLoc " << locStr(getLocKind(LiveSet, Var))
1699 << " -> Val, dbg.value override");
1700
1701 setLocKind(LiveSet, Var, LocKind::Val);
1702 emitDbgValue(LocKind::Val, &DVI, &DVI);
1703}
1704
1706 if (auto F = DVI.getExpression()->getFragmentInfo())
1707 return F->SizeInBits == 0;
1708 return false;
1709}
1710
1711void AssignmentTrackingLowering::processDbgInstruction(
1712 DbgInfoIntrinsic &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
1713 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I);
1714 if (!DVI)
1715 return;
1716
1717 // Ignore assignments to zero bits of the variable.
1718 if (hasZeroSizedFragment(*DVI))
1719 return;
1720
1721 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&I))
1722 processDbgAssign(*DAI, LiveSet);
1723 else if (auto *DVI = dyn_cast<DbgValueInst>(&I))
1724 processDbgValue(*DVI, LiveSet);
1725}
1726
1727void AssignmentTrackingLowering::resetInsertionPoint(Instruction &After) {
1728 assert(!After.isTerminator() && "Can't insert after a terminator");
1729 auto R = InsertBeforeMap.find(After.getNextNode());
1730 if (R == InsertBeforeMap.end())
1731 return;
1732 R->second.clear();
1733}
1734
1735void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) {
1736 for (auto II = BB.begin(), EI = BB.end(); II != EI;) {
1737 assert(VarsTouchedThisFrame.empty());
1738 // Process the instructions in "frames". A "frame" includes a single
1739 // non-debug instruction followed any debug instructions before the
1740 // next non-debug instruction.
1741 if (!isa<DbgInfoIntrinsic>(&*II)) {
1742 if (II->isTerminator())
1743 break;
1744 resetInsertionPoint(*II);
1745 processNonDbgInstruction(*II, LiveSet);
1746 assert(LiveSet->isValid());
1747 ++II;
1748 }
1749 while (II != EI) {
1750 auto *Dbg = dyn_cast<DbgInfoIntrinsic>(&*II);
1751 if (!Dbg)
1752 break;
1753 resetInsertionPoint(*II);
1754 processDbgInstruction(*Dbg, LiveSet);
1755 assert(LiveSet->isValid());
1756 ++II;
1757 }
1758
1759 // We've processed everything in the "frame". Now determine which variables
1760 // cannot be represented by a dbg.declare.
1761 for (auto Var : VarsTouchedThisFrame) {
1762 LocKind Loc = getLocKind(LiveSet, Var);
1763 // If a variable's LocKind is anything other than LocKind::Mem then we
1764 // must note that it cannot be represented with a dbg.declare.
1765 // Note that this check is enough without having to check the result of
1766 // joins() because for join to produce anything other than Mem after
1767 // we've already seen a Mem we'd be joining None or Val with Mem. In that
1768 // case, we've already hit this codepath when we set the LocKind to Val
1769 // or None in that block.
1770 if (Loc != LocKind::Mem) {
1771 DebugVariable DbgVar = FnVarLocs->getVariable(Var);
1772 DebugAggregate Aggr{DbgVar.getVariable(), DbgVar.getInlinedAt()};
1773 NotAlwaysStackHomed.insert(Aggr);
1774 }
1775 }
1776 VarsTouchedThisFrame.clear();
1777 }
1778}
1779
1780AssignmentTrackingLowering::LocKind
1781AssignmentTrackingLowering::joinKind(LocKind A, LocKind B) {
1782 // Partial order:
1783 // None > Mem, Val
1784 return A == B ? A : LocKind::None;
1785}
1786
1787AssignmentTrackingLowering::Assignment
1788AssignmentTrackingLowering::joinAssignment(const Assignment &A,
1789 const Assignment &B) {
1790 // Partial order:
1791 // NoneOrPhi(null, null) > Known(v, ?s)
1792
1793 // If either are NoneOrPhi the join is NoneOrPhi.
1794 // If either value is different then the result is
1795 // NoneOrPhi (joining two values is a Phi).
1796 if (!A.isSameSourceAssignment(B))
1797 return Assignment::makeNoneOrPhi();
1798 if (A.Status == Assignment::NoneOrPhi)
1799 return Assignment::makeNoneOrPhi();
1800
1801 // Source is used to lookup the value + expression in the debug program if
1802 // the stack slot gets assigned a value earlier than expected. Because
1803 // we're only tracking the one dbg.assign, we can't capture debug PHIs.
1804 // It's unlikely that we're losing out on much coverage by avoiding that
1805 // extra work.
1806 // The Source may differ in this situation:
1807 // Pred.1:
1808 // dbg.assign i32 0, ..., !1, ...
1809 // Pred.2:
1810 // dbg.assign i32 1, ..., !1, ...
1811 // Here the same assignment (!1) was performed in both preds in the source,
1812 // but we can't use either one unless they are identical (e.g. .we don't
1813 // want to arbitrarily pick between constant values).
1814 auto JoinSource = [&]() -> DbgAssignIntrinsic * {
1815 if (A.Source == B.Source)
1816 return A.Source;
1817 if (A.Source == nullptr || B.Source == nullptr)
1818 return nullptr;
1819 if (A.Source->isIdenticalTo(B.Source))
1820 return A.Source;
1821 return nullptr;
1822 };
1823 DbgAssignIntrinsic *Source = JoinSource();
1824 assert(A.Status == B.Status && A.Status == Assignment::Known);
1825 assert(A.ID == B.ID);
1826 return Assignment::make(A.ID, Source);
1827}
1828
1829AssignmentTrackingLowering::BlockInfo
1830AssignmentTrackingLowering::joinBlockInfo(const BlockInfo &A,
1831 const BlockInfo &B) {
1832 return BlockInfo::join(A, B, TrackedVariablesVectorSize);
1833}
1834
1835bool AssignmentTrackingLowering::join(
1836 const BasicBlock &BB, const SmallPtrSet<BasicBlock *, 16> &Visited) {
1837
1839 // Ignore backedges if we have not visited the predecessor yet. As the
1840 // predecessor hasn't yet had locations propagated into it, most locations
1841 // will not yet be valid, so treat them as all being uninitialized and
1842 // potentially valid. If a location guessed to be correct here is
1843 // invalidated later, we will remove it when we revisit this block. This
1844 // is essentially the same as initialising all LocKinds and Assignments to
1845 // an implicit ⊥ value which is the identity value for the join operation.
1846 for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) {
1847 const BasicBlock *Pred = *I;
1848 if (Visited.count(Pred))
1849 VisitedPreds.push_back(Pred);
1850 }
1851
1852 // No preds visited yet.
1853 if (VisitedPreds.empty()) {
1854 auto It = LiveIn.try_emplace(&BB, BlockInfo());
1855 bool DidInsert = It.second;
1856 if (DidInsert)
1857 It.first->second.init(TrackedVariablesVectorSize);
1858 return /*Changed*/ DidInsert;
1859 }
1860
1861 // Exactly one visited pred. Copy the LiveOut from that pred into BB LiveIn.
1862 if (VisitedPreds.size() == 1) {
1863 const BlockInfo &PredLiveOut = LiveOut.find(VisitedPreds[0])->second;
1864 auto CurrentLiveInEntry = LiveIn.find(&BB);
1865
1866 // Check if there isn't an entry, or there is but the LiveIn set has
1867 // changed (expensive check).
1868 if (CurrentLiveInEntry == LiveIn.end())
1869 LiveIn.insert(std::make_pair(&BB, PredLiveOut));
1870 else if (PredLiveOut != CurrentLiveInEntry->second)
1871 CurrentLiveInEntry->second = PredLiveOut;
1872 else
1873 return /*Changed*/ false;
1874 return /*Changed*/ true;
1875 }
1876
1877 // More than one pred. Join LiveOuts of blocks 1 and 2.
1878 assert(VisitedPreds.size() > 1);
1879 const BlockInfo &PredLiveOut0 = LiveOut.find(VisitedPreds[0])->second;
1880 const BlockInfo &PredLiveOut1 = LiveOut.find(VisitedPreds[1])->second;
1881 BlockInfo BBLiveIn = joinBlockInfo(PredLiveOut0, PredLiveOut1);
1882
1883 // Join the LiveOuts of subsequent blocks.
1884 ArrayRef Tail = ArrayRef(VisitedPreds).drop_front(2);
1885 for (const BasicBlock *Pred : Tail) {
1886 const auto &PredLiveOut = LiveOut.find(Pred);
1887 assert(PredLiveOut != LiveOut.end() &&
1888 "block should have been processed already");
1889 BBLiveIn = joinBlockInfo(std::move(BBLiveIn), PredLiveOut->second);
1890 }
1891
1892 // Save the joined result for BB.
1893 auto CurrentLiveInEntry = LiveIn.find(&BB);
1894 // Check if there isn't an entry, or there is but the LiveIn set has changed
1895 // (expensive check).
1896 if (CurrentLiveInEntry == LiveIn.end())
1897 LiveIn.try_emplace(&BB, std::move(BBLiveIn));
1898 else if (BBLiveIn != CurrentLiveInEntry->second)
1899 CurrentLiveInEntry->second = std::move(BBLiveIn);
1900 else
1901 return /*Changed*/ false;
1902 return /*Changed*/ true;
1903}
1904
1905/// Return true if A fully contains B.
1908 auto ALeft = A.OffsetInBits;
1909 auto BLeft = B.OffsetInBits;
1910 if (BLeft < ALeft)
1911 return false;
1912
1913 auto ARight = ALeft + A.SizeInBits;
1914 auto BRight = BLeft + B.SizeInBits;
1915 if (BRight > ARight)
1916 return false;
1917 return true;
1918}
1919
1920static std::optional<at::AssignmentInfo>
1922 // Don't bother checking if this is an AllocaInst. We know this
1923 // instruction has no tag which means there are no variables associated
1924 // with it.
1925 if (const auto *SI = dyn_cast<StoreInst>(&I))
1926 return at::getAssignmentInfo(Layout, SI);
1927 if (const auto *MI = dyn_cast<MemIntrinsic>(&I))
1928 return at::getAssignmentInfo(Layout, MI);
1929 // Alloca or non-store-like inst.
1930 return std::nullopt;
1931}
1932
1933/// Build a map of {Variable x: Variables y} where all variable fragments
1934/// contained within the variable fragment x are in set y. This means that
1935/// y does not contain all overlaps because partial overlaps are excluded.
1936///
1937/// While we're iterating over the function, add single location defs for
1938/// dbg.declares to \p FnVarLocs.
1939///
1940/// Variables that are interesting to this pass in are added to
1941/// FnVarLocs->Variables first. TrackedVariablesVectorSize is set to the ID of
1942/// the last interesting variable plus 1, meaning variables with ID 1
1943/// (inclusive) to TrackedVariablesVectorSize (exclusive) are interesting. The
1944/// subsequent variables are either stack homed or fully promoted.
1945///
1946/// Finally, populate UntaggedStoreVars with a mapping of untagged stores to
1947/// the stored-to variable fragments.
1948///
1949/// These tasks are bundled together to reduce the number of times we need
1950/// to iterate over the function as they can be achieved together in one pass.
1952 Function &Fn, FunctionVarLocsBuilder *FnVarLocs,
1953 const DenseSet<DebugAggregate> &VarsWithStackSlot,
1955 unsigned &TrackedVariablesVectorSize) {
1957 // Map of Variable: [Fragments].
1959 // Iterate over all instructions:
1960 // - dbg.declare -> add single location variable record
1961 // - dbg.* -> Add fragments to FragmentMap
1962 // - untagged store -> Add fragments to FragmentMap and update
1963 // UntaggedStoreVars.
1964 // We need to add fragments for untagged stores too so that we can correctly
1965 // clobber overlapped fragment locations later.
1967 for (auto &BB : Fn) {
1968 for (auto &I : BB) {
1969 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1970 Declares.push_back(DDI);
1971 } else if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1972 DebugVariable DV = DebugVariable(DII);
1973 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
1974 if (!VarsWithStackSlot.contains(DA))
1975 continue;
1976 if (Seen.insert(DV).second)
1977 FragmentMap[DA].push_back(DV);
1978 } else if (auto Info = getUntaggedStoreAssignmentInfo(
1979 I, Fn.getParent()->getDataLayout())) {
1980 // Find markers linked to this alloca.
1982 std::optional<DIExpression::FragmentInfo> FragInfo;
1983
1984 // Skip this assignment if the affected bits are outside of the
1985 // variable fragment.
1987 I.getModule()->getDataLayout(), Info->Base,
1988 Info->OffsetInBits, Info->SizeInBits, DAI, FragInfo) ||
1989 (FragInfo && FragInfo->SizeInBits == 0))
1990 continue;
1991
1992 // FragInfo from calculateFragmentIntersect is nullopt if the
1993 // resultant fragment matches DAI's fragment or entire variable - in
1994 // which case copy the fragment info from DAI. If FragInfo is still
1995 // nullopt after the copy it means "no fragment info" instead, which
1996 // is how it is usually interpreted.
1997 if (!FragInfo)
1998 FragInfo = DAI->getExpression()->getFragmentInfo();
1999
2000 DebugVariable DV = DebugVariable(DAI->getVariable(), FragInfo,
2001 DAI->getDebugLoc().getInlinedAt());
2002 DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
2003 if (!VarsWithStackSlot.contains(DA))
2004 continue;
2005
2006 // Cache this info for later.
2007 UntaggedStoreVars[&I].push_back(
2008 {FnVarLocs->insertVariable(DV), *Info});
2009
2010 if (Seen.insert(DV).second)
2011 FragmentMap[DA].push_back(DV);
2012 }
2013 }
2014 }
2015 }
2016
2017 // Sort the fragment map for each DebugAggregate in ascending
2018 // order of fragment size - there should be no duplicates.
2019 for (auto &Pair : FragmentMap) {
2020 SmallVector<DebugVariable, 8> &Frags = Pair.second;
2021 std::sort(Frags.begin(), Frags.end(),
2022 [](const DebugVariable &Next, const DebugVariable &Elmt) {
2023 return Elmt.getFragmentOrDefault().SizeInBits >
2024 Next.getFragmentOrDefault().SizeInBits;
2025 });
2026 // Check for duplicates.
2027 assert(std::adjacent_find(Frags.begin(), Frags.end()) == Frags.end());
2028 }
2029
2030 // Build the map.
2032 for (auto &Pair : FragmentMap) {
2033 auto &Frags = Pair.second;
2034 for (auto It = Frags.begin(), IEnd = Frags.end(); It != IEnd; ++It) {
2035 DIExpression::FragmentInfo Frag = It->getFragmentOrDefault();
2036 // Find the frags that this is contained within.
2037 //
2038 // Because Frags is sorted by size and none have the same offset and
2039 // size, we know that this frag can only be contained by subsequent
2040 // elements.
2042 ++OtherIt;
2043 VariableID ThisVar = FnVarLocs->insertVariable(*It);
2044 for (; OtherIt != IEnd; ++OtherIt) {
2045 DIExpression::FragmentInfo OtherFrag = OtherIt->getFragmentOrDefault();
2046 VariableID OtherVar = FnVarLocs->insertVariable(*OtherIt);
2047 if (fullyContains(OtherFrag, Frag))
2048 Map[OtherVar].push_back(ThisVar);
2049 }
2050 }
2051 }
2052
2053 // VariableIDs are 1-based so the variable-tracking bitvector needs
2054 // NumVariables plus 1 bits.
2055 TrackedVariablesVectorSize = FnVarLocs->getNumVariables() + 1;
2056
2057 // Finally, insert the declares afterwards, so the first IDs are all
2058 // partially stack homed vars.
2059 for (auto *DDI : Declares)
2060 FnVarLocs->addSingleLocVar(DebugVariable(DDI), DDI->getExpression(),
2061 DDI->getDebugLoc(), DDI->getWrappedLocation());
2062 return Map;
2063}
2064
2065bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
2066 if (Fn.size() > MaxNumBlocks) {
2067 LLVM_DEBUG(dbgs() << "[AT] Dropping var locs in: " << Fn.getName()
2068 << ": too many blocks (" << Fn.size() << ")\n");
2069 at::deleteAll(&Fn);
2070 return false;
2071 }
2072
2073 FnVarLocs = FnVarLocsBuilder;
2074
2075 // The general structure here is inspired by VarLocBasedImpl.cpp
2076 // (LiveDebugValues).
2077
2078 // Build the variable fragment overlap map.
2079 // Note that this pass doesn't handle partial overlaps correctly (FWIW
2080 // neither does LiveDebugVariables) because that is difficult to do and
2081 // appears to be rare occurance.
2083 Fn, FnVarLocs, *VarsWithStackSlot, UntaggedStoreVars,
2084 TrackedVariablesVectorSize);
2085
2086 // Prepare for traversal.
2088 std::priority_queue<unsigned int, std::vector<unsigned int>,
2089 std::greater<unsigned int>>
2090 Worklist;
2091 std::priority_queue<unsigned int, std::vector<unsigned int>,
2092 std::greater<unsigned int>>
2093 Pending;
2096 { // Init OrderToBB and BBToOrder.
2097 unsigned int RPONumber = 0;
2098 for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) {
2099 OrderToBB[RPONumber] = *RI;
2100 BBToOrder[*RI] = RPONumber;
2101 Worklist.push(RPONumber);
2102 ++RPONumber;
2103 }
2104 LiveIn.init(RPONumber);
2105 LiveOut.init(RPONumber);
2106 }
2107
2108 // Perform the traversal.
2109 //
2110 // This is a standard "union of predecessor outs" dataflow problem. To solve
2111 // it, we perform join() and process() using the two worklist method until
2112 // the LiveIn data for each block becomes unchanging. The "proof" that this
2113 // terminates can be put together by looking at the comments around LocKind,
2114 // Assignment, and the various join methods, which show that all the elements
2115 // involved are made up of join-semilattices; LiveIn(n) can only
2116 // monotonically increase in value throughout the dataflow.
2117 //
2119 while (!Worklist.empty()) {
2120 // We track what is on the pending worklist to avoid inserting the same
2121 // thing twice.
2123 LLVM_DEBUG(dbgs() << "Processing Worklist\n");
2124 while (!Worklist.empty()) {
2125 BasicBlock *BB = OrderToBB[Worklist.top()];
2126 LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n");
2127 Worklist.pop();
2128 bool InChanged = join(*BB, Visited);
2129 // Always consider LiveIn changed on the first visit.
2130 InChanged |= Visited.insert(BB).second;
2131 if (InChanged) {
2132 LLVM_DEBUG(dbgs() << BB->getName() << " has new InLocs, process it\n");
2133 // Mutate a copy of LiveIn while processing BB. After calling process
2134 // LiveSet is the LiveOut set for BB.
2135 BlockInfo LiveSet = LiveIn[BB];
2136
2137 // Process the instructions in the block.
2138 process(*BB, &LiveSet);
2139
2140 // Relatively expensive check: has anything changed in LiveOut for BB?
2141 if (LiveOut[BB] != LiveSet) {
2142 LLVM_DEBUG(dbgs() << BB->getName()
2143 << " has new OutLocs, add succs to worklist: [ ");
2144 LiveOut[BB] = std::move(LiveSet);
2145 for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) {
2146 if (OnPending.insert(*I).second) {
2147 LLVM_DEBUG(dbgs() << I->getName() << " ");
2148 Pending.push(BBToOrder[*I]);
2149 }
2150 }
2151 LLVM_DEBUG(dbgs() << "]\n");
2152 }
2153 }
2154 }
2155 Worklist.swap(Pending);
2156 // At this point, pending must be empty, since it was just the empty
2157 // worklist
2158 assert(Pending.empty() && "Pending should be empty");
2159 }
2160
2161 // That's the hard part over. Now we just have some admin to do.
2162
2163 // Record whether we inserted any intrinsics.
2164 bool InsertedAnyIntrinsics = false;
2165
2166 // Identify and add defs for single location variables.
2167 //
2168 // Go through all of the defs that we plan to add. If the aggregate variable
2169 // it's a part of is not in the NotAlwaysStackHomed set we can emit a single
2170 // location def and omit the rest. Add an entry to AlwaysStackHomed so that
2171 // we can identify those uneeded defs later.
2172 DenseSet<DebugAggregate> AlwaysStackHomed;
2173 for (const auto &Pair : InsertBeforeMap) {
2174 const auto &Vec = Pair.second;
2175 for (VarLocInfo VarLoc : Vec) {
2176 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2177 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2178
2179 // Skip this Var if it's not always stack homed.
2180 if (NotAlwaysStackHomed.contains(Aggr))
2181 continue;
2182
2183 // Skip complex cases such as when different fragments of a variable have
2184 // been split into different allocas. Skipping in this case means falling
2185 // back to using a list of defs (which could reduce coverage, but is no
2186 // less correct).
2187 bool Simple =
2188 VarLoc.Expr->getNumElements() == 1 && VarLoc.Expr->startsWithDeref();
2189 if (!Simple) {
2190 NotAlwaysStackHomed.insert(Aggr);
2191 continue;
2192 }
2193
2194 // All source assignments to this variable remain and all stores to any
2195 // part of the variable store to the same address (with varying
2196 // offsets). We can just emit a single location for the whole variable.
2197 //
2198 // Unless we've already done so, create the single location def now.
2199 if (AlwaysStackHomed.insert(Aggr).second) {
2200 assert(!VarLoc.Values.hasArgList());
2201 // TODO: When more complex cases are handled VarLoc.Expr should be
2202 // built appropriately rather than always using an empty DIExpression.
2203 // The assert below is a reminder.
2204 assert(Simple);
2205 VarLoc.Expr = DIExpression::get(Fn.getContext(), std::nullopt);
2206 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2207 FnVarLocs->addSingleLocVar(Var, VarLoc.Expr, VarLoc.DL, VarLoc.Values);
2208 InsertedAnyIntrinsics = true;
2209 }
2210 }
2211 }
2212
2213 // Insert the other DEFs.
2214 for (const auto &[InsertBefore, Vec] : InsertBeforeMap) {
2216 for (const VarLocInfo &VarLoc : Vec) {
2217 DebugVariable Var = FnVarLocs->getVariable(VarLoc.VariableID);
2218 DebugAggregate Aggr{Var.getVariable(), Var.getInlinedAt()};
2219 // If this variable is always stack homed then we have already inserted a
2220 // dbg.declare and deleted this dbg.value.
2221 if (AlwaysStackHomed.contains(Aggr))
2222 continue;
2223 NewDefs.push_back(VarLoc);
2224 InsertedAnyIntrinsics = true;
2225 }
2226
2227 FnVarLocs->setWedge(InsertBefore, std::move(NewDefs));
2228 }
2229
2230 InsertedAnyIntrinsics |= emitPromotedVarLocs(FnVarLocs);
2231
2232 return InsertedAnyIntrinsics;
2233}
2234
2235bool AssignmentTrackingLowering::emitPromotedVarLocs(
2236 FunctionVarLocsBuilder *FnVarLocs) {
2237 bool InsertedAnyIntrinsics = false;
2238 // Go through every block, translating debug intrinsics for fully promoted
2239 // variables into FnVarLocs location defs. No analysis required for these.
2240 for (auto &BB : Fn) {
2241 for (auto &I : BB) {
2242 // Skip instructions other than dbg.values and dbg.assigns.
2243 auto *DVI = dyn_cast<DbgValueInst>(&I);
2244 if (!DVI)
2245 continue;
2246 // Skip variables that haven't been promoted - we've dealt with those
2247 // already.
2248 if (VarsWithStackSlot->contains(getAggregate(DVI)))
2249 continue;
2250 Instruction *InsertBefore = I.getNextNode();
2251 assert(InsertBefore && "Unexpected: debug intrinsics after a terminator");
2252 FnVarLocs->addVarLoc(InsertBefore, DebugVariable(DVI),
2253 DVI->getExpression(), DVI->getDebugLoc(),
2254 DVI->getWrappedLocation());
2255 InsertedAnyIntrinsics = true;
2256 }
2257 }
2258 return InsertedAnyIntrinsics;
2259}
2260
2261/// Remove redundant definitions within sequences of consecutive location defs.
2262/// This is done using a backward scan to keep the last def describing a
2263/// specific variable/fragment.
2264///
2265/// This implements removeRedundantDbgInstrsUsingBackwardScan from
2266/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2267/// FunctionVarLocsBuilder instead of with intrinsics.
2268static bool
2270 FunctionVarLocsBuilder &FnVarLocs) {
2271 bool Changed = false;
2272 SmallDenseMap<DebugAggregate, BitVector> VariableDefinedBits;
2273 // Scan over the entire block, not just over the instructions mapped by
2274 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2275 // instructions.
2276 for (const Instruction &I : reverse(*BB)) {
2277 if (!isa<DbgVariableIntrinsic>(I)) {
2278 // Sequence of consecutive defs ended. Clear map for the next one.
2279 VariableDefinedBits.clear();
2280 }
2281
2282 // Get the location defs that start just before this instruction.
2283 const auto *Locs = FnVarLocs.getWedge(&I);
2284 if (!Locs)
2285 continue;
2286
2287 NumWedgesScanned++;
2288 bool ChangedThisWedge = false;
2289 // The new pruned set of defs, reversed because we're scanning backwards.
2290 SmallVector<VarLocInfo> NewDefsReversed;
2291
2292 // Iterate over the existing defs in reverse.
2293 for (auto RIt = Locs->rbegin(), REnd = Locs->rend(); RIt != REnd; ++RIt) {
2294 NumDefsScanned++;
2295 DebugAggregate Aggr =
2296 getAggregate(FnVarLocs.getVariable(RIt->VariableID));
2297 uint64_t SizeInBits = Aggr.first->getSizeInBits().value_or(0);
2298
2299 if (SizeInBits == 0) {
2300 // If the size is unknown (0) then keep this location def to be safe.
2301 NewDefsReversed.push_back(*RIt);
2302 continue;
2303 }
2304
2305 // Only keep this location definition if it is not fully eclipsed by
2306 // other definitions in this wedge that come after it
2307
2308 // Inert the bits the location definition defines.
2309 auto InsertResult =
2310 VariableDefinedBits.try_emplace(Aggr, BitVector(SizeInBits));
2311 bool FirstDefinition = InsertResult.second;
2312 BitVector &DefinedBits = InsertResult.first->second;
2313
2315 RIt->Expr->getFragmentInfo().value_or(
2316 DIExpression::FragmentInfo(SizeInBits, 0));
2317 bool InvalidFragment = Fragment.endInBits() > SizeInBits;
2318
2319 // If this defines any previously undefined bits, keep it.
2320 if (FirstDefinition || InvalidFragment ||
2321 DefinedBits.find_first_unset_in(Fragment.startInBits(),
2322 Fragment.endInBits()) != -1) {
2323 if (!InvalidFragment)
2324 DefinedBits.set(Fragment.startInBits(), Fragment.endInBits());
2325 NewDefsReversed.push_back(*RIt);
2326 continue;
2327 }
2328
2329 // Redundant def found: throw it away. Since the wedge of defs is being
2330 // rebuilt, doing nothing is the same as deleting an entry.
2331 ChangedThisWedge = true;
2332 NumDefsRemoved++;
2333 }
2334
2335 // Un-reverse the defs and replace the wedge with the pruned version.
2336 if (ChangedThisWedge) {
2337 std::reverse(NewDefsReversed.begin(), NewDefsReversed.end());
2338 FnVarLocs.setWedge(&I, std::move(NewDefsReversed));
2339 NumWedgesChanged++;
2340 Changed = true;
2341 }
2342 }
2343
2344 return Changed;
2345}
2346
2347/// Remove redundant location defs using a forward scan. This can remove a
2348/// location definition that is redundant due to indicating that a variable has
2349/// the same value as is already being indicated by an earlier def.
2350///
2351/// This implements removeRedundantDbgInstrsUsingForwardScan from
2352/// lib/Transforms/Utils/BasicBlockUtils.cpp for locations described with
2353/// FunctionVarLocsBuilder instead of with intrinsics
2354static bool
2356 FunctionVarLocsBuilder &FnVarLocs) {
2357 bool Changed = false;
2359 VariableMap;
2360
2361 // Scan over the entire block, not just over the instructions mapped by
2362 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2363 // instructions.
2364 for (const Instruction &I : *BB) {
2365 // Get the defs that come just before this instruction.
2366 const auto *Locs = FnVarLocs.getWedge(&I);
2367 if (!Locs)
2368 continue;
2369
2370 NumWedgesScanned++;
2371 bool ChangedThisWedge = false;
2372 // The new pruned set of defs.
2374
2375 // Iterate over the existing defs.
2376 for (const VarLocInfo &Loc : *Locs) {
2377 NumDefsScanned++;
2378 DebugVariable Key(FnVarLocs.getVariable(Loc.VariableID).getVariable(),
2379 std::nullopt, Loc.DL.getInlinedAt());
2380 auto VMI = VariableMap.find(Key);
2381
2382 // Update the map if we found a new value/expression describing the
2383 // variable, or if the variable wasn't mapped already.
2384 if (VMI == VariableMap.end() || VMI->second.first != Loc.Values ||
2385 VMI->second.second != Loc.Expr) {
2386 VariableMap[Key] = {Loc.Values, Loc.Expr};
2387 NewDefs.push_back(Loc);
2388 continue;
2389 }
2390
2391 // Did not insert this Loc, which is the same as removing it.
2392 ChangedThisWedge = true;
2393 NumDefsRemoved++;
2394 }
2395
2396 // Replace the existing wedge with the pruned version.
2397 if (ChangedThisWedge) {
2398 FnVarLocs.setWedge(&I, std::move(NewDefs));
2399 NumWedgesChanged++;
2400 Changed = true;
2401 }
2402 }
2403
2404 return Changed;
2405}
2406
2407static bool
2409 FunctionVarLocsBuilder &FnVarLocs) {
2410 assert(BB->isEntryBlock());
2411 // Do extra work to ensure that we remove semantically unimportant undefs.
2412 //
2413 // This is to work around the fact that SelectionDAG will hoist dbg.values
2414 // using argument values to the top of the entry block. That can move arg
2415 // dbg.values before undef and constant dbg.values which they previously
2416 // followed. The easiest thing to do is to just try to feed SelectionDAG
2417 // input it's happy with.
2418 //
2419 // Map of {Variable x: Fragments y} where the fragments y of variable x have
2420 // have at least one non-undef location defined already. Don't use directly,
2421 // instead call DefineBits and HasDefinedBits.
2423 VarsWithDef;
2424 // Specify that V (a fragment of A) has a non-undef location.
2425 auto DefineBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2426 VarsWithDef[A].insert(V.getFragmentOrDefault());
2427 };
2428 // Return true if a non-undef location has been defined for V (a fragment of
2429 // A). Doesn't imply that the location is currently non-undef, just that a
2430 // non-undef location has been seen previously.
2431 auto HasDefinedBits = [&VarsWithDef](DebugAggregate A, DebugVariable V) {
2432 auto FragsIt = VarsWithDef.find(A);
2433 if (FragsIt == VarsWithDef.end())
2434 return false;
2435 return llvm::any_of(FragsIt->second, [V](auto Frag) {
2436 return DIExpression::fragmentsOverlap(Frag, V.getFragmentOrDefault());
2437 });
2438 };
2439
2440 bool Changed = false;
2442
2443 // Scan over the entire block, not just over the instructions mapped by
2444 // FnVarLocs, because wedges in FnVarLocs may only be seperated by debug
2445 // instructions.
2446 for (const Instruction &I : *BB) {
2447 // Get the defs that come just before this instruction.
2448 const auto *Locs = FnVarLocs.getWedge(&I);
2449 if (!Locs)
2450 continue;
2451
2452 NumWedgesScanned++;
2453 bool ChangedThisWedge = false;
2454 // The new pruned set of defs.
2456
2457 // Iterate over the existing defs.
2458 for (const VarLocInfo &Loc : *Locs) {
2459 NumDefsScanned++;
2460 DebugAggregate Aggr{FnVarLocs.getVariable(Loc.VariableID).getVariable(),
2461 Loc.DL.getInlinedAt()};
2462 DebugVariable Var = FnVarLocs.getVariable(Loc.VariableID);
2463
2464 // Remove undef entries that are encountered before any non-undef
2465 // intrinsics from the entry block.
2466 if (Loc.Values.isKillLocation(Loc.Expr) && !HasDefinedBits(Aggr, Var)) {
2467 // Did not insert this Loc, which is the same as removing it.
2468 NumDefsRemoved++;
2469 ChangedThisWedge = true;
2470 continue;
2471 }
2472
2473 DefineBits(Aggr, Var);
2474 NewDefs.push_back(Loc);
2475 }
2476
2477 // Replace the existing wedge with the pruned version.
2478 if (ChangedThisWedge) {
2479 FnVarLocs.setWedge(&I, std::move(NewDefs));
2480 NumWedgesChanged++;
2481 Changed = true;
2482 }
2483 }
2484
2485 return Changed;
2486}
2487
2489 FunctionVarLocsBuilder &FnVarLocs) {
2490 bool MadeChanges = false;
2491 MadeChanges |= removeRedundantDbgLocsUsingBackwardScan(BB, FnVarLocs);
2492 if (BB->isEntryBlock())
2493 MadeChanges |= removeUndefDbgLocsFromEntryBlock(BB, FnVarLocs);
2494 MadeChanges |= removeRedundantDbgLocsUsingForwardScan(BB, FnVarLocs);
2495
2496 if (MadeChanges)
2497 LLVM_DEBUG(dbgs() << "Removed redundant dbg locs from: " << BB->getName()
2498 << "\n");
2499 return MadeChanges;
2500}
2501
2504 for (auto &BB : Fn) {
2505 for (auto &I : BB) {
2506 // Any variable linked to an instruction is considered
2507 // interesting. Ideally we only need to check Allocas, however, a
2508 // DIAssignID might get dropped from an alloca but not stores. In that
2509 // case, we need to consider the variable interesting for NFC behaviour
2510 // with this change. TODO: Consider only looking at allocas.
2512 Result.insert({DAI->getVariable(), DAI->getDebugLoc().getInlinedAt()});
2513 }
2514 }
2515 }
2516 return Result;
2517}
2518
2519static void analyzeFunction(Function &Fn, const DataLayout &Layout,
2520 FunctionVarLocsBuilder *FnVarLocs) {
2521 // The analysis will generate location definitions for all variables, but we
2522 // only need to perform a dataflow on the set of variables which have a stack
2523 // slot. Find those now.
2524 DenseSet<DebugAggregate> VarsWithStackSlot = findVarsWithStackSlot(Fn);
2525
2526 bool Changed = false;
2527
2528 // Use a scope block to clean up AssignmentTrackingLowering before running
2529 // MemLocFragmentFill to reduce peak memory consumption.
2530 {
2531 AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot);
2532 Changed = Pass.run(FnVarLocs);
2533 }
2534
2535 if (Changed) {
2536 MemLocFragmentFill Pass(Fn, &VarsWithStackSlot,
2538 Pass.run(FnVarLocs);
2539
2540 // Remove redundant entries. As well as reducing memory consumption and
2541 // avoiding waiting cycles later by burning some now, this has another
2542 // important job. That is to work around some SelectionDAG quirks. See
2543 // removeRedundantDbgLocsUsingForwardScan comments for more info on that.
2544 for (auto &BB : Fn)
2545 removeRedundantDbgLocs(&BB, *FnVarLocs);
2546 }
2547}
2548
2550 if (!isAssignmentTrackingEnabled(*F.getParent()))
2551 return false;
2552
2553 LLVM_DEBUG(dbgs() << "AssignmentTrackingAnalysis run on " << F.getName()
2554 << "\n");
2555 auto DL = std::make_unique<DataLayout>(F.getParent());
2556
2557 // Clear previous results.
2558 Results->clear();
2559
2560 FunctionVarLocsBuilder Builder;
2561 analyzeFunction(F, *DL.get(), &Builder);
2562
2563 // Save these results.
2564 Results->init(Builder);
2565
2566 if (PrintResults && isFunctionInPrintList(F.getName()))
2567 Results->print(errs(), F);
2568
2569 // Return false because this pass does not modify the function.
2570 return false;
2571}
2572
2574 : FunctionPass(ID), Results(std::make_unique<FunctionVarLocs>()) {}
2575
2577
2579 "Assignment Tracking Analysis", false, true)
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
std::pair< const DILocalVariable *, const DILocation * > DebugAggregate
A whole (unfragmented) source variable.
static void analyzeFunction(Function &Fn, const DataLayout &Layout, FunctionVarLocsBuilder *FnVarLocs)
static std::pair< Value *, DIExpression * > walkToAllocaAndPrependOffsetDeref(const DataLayout &DL, Value *Start, DIExpression *Expression)
Walk backwards along constant GEPs and bitcasts to the base storage from Start as far as possible.
static DIAssignID * getIDFromMarker(const DbgAssignIntrinsic &DAI)
static DenseSet< DebugAggregate > findVarsWithStackSlot(Function &Fn)
static bool hasZeroSizedFragment(DbgVariableIntrinsic &DVI)
static bool fullyContains(DIExpression::FragmentInfo A, DIExpression::FragmentInfo B)
Return true if A fully contains B.
static DebugAggregate getAggregate(const DbgVariableIntrinsic *DII)
static std::optional< at::AssignmentInfo > getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout)
static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(Function &Fn, FunctionVarLocsBuilder *FnVarLocs, const DenseSet< DebugAggregate > &VarsWithStackSlot, AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars, unsigned &TrackedVariablesVectorSize)
Build a map of {Variable x: Variables y} where all variable fragments contained within the variable f...
static bool removeUndefDbgLocsFromEntryBlock(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
static cl::opt< bool > PrintResults("print-debug-ata", cl::init(false), cl::Hidden)
Print the results of the analysis. Respects -filter-print-funcs.
const char * locStr(AssignmentTrackingLowering::LocKind Loc)
static bool removeRedundantDbgLocsUsingForwardScan(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
Remove redundant location defs using a forward scan.
static bool removeRedundantDbgLocs(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
static cl::opt< bool > EnableMemLocFragFill("mem-loc-frag-fill", cl::init(true), cl::Hidden)
Option for debugging the pass, determines if the memory location fragment filling happens after gener...
static DIAssignID * getIDFromInst(const Instruction &I)
static std::optional< int64_t > getDerefOffsetInBytes(const DIExpression *DIExpr)
Extract the offset used in DIExpr.
static bool removeRedundantDbgLocsUsingBackwardScan(const BasicBlock *BB, FunctionVarLocsBuilder &FnVarLocs)
Remove redundant definitions within sequences of consecutive location defs.
static cl::opt< cl::boolOrDefault > CoalesceAdjacentFragmentsOpt("debug-ata-coalesce-frags", cl::Hidden)
Coalesce adjacent dbg locs describing memory locations that have contiguous fragments.
static cl::opt< unsigned > MaxNumBlocks("debug-ata-max-blocks", cl::init(10000), cl::desc("Maximum num basic blocks before debug info dropped"), cl::Hidden)
static bool shouldCoalesceFragments(Function &F)
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
dxil metadata DXIL Metadata Emit
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines DenseMapInfo traits for DenseMap.
This file contains constants used for implementing Dwarf debug support.
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1275
bool End
Definition: ELF_riscv.cpp:478
#define DEBUG_TYPE
IRTranslator LLVM IR MI
This file implements a coalescing interval map for small objects.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
bool Debug
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Scalar Replacement Of Aggregates
Definition: SROA.cpp:5329
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
@ None
Value * RHS
Value * LHS
Helper class to build FunctionVarLocs, since that class isn't easy to modify.
void addVarLoc(Instruction *Before, DebugVariable Var, DIExpression *Expr, DebugLoc DL, RawLocationWrapper R)
Add a def to the wedge of defs just before /p Before.
void setWedge(const Instruction *Before, SmallVector< VarLocInfo > &&Wedge)
Replace the defs that come just before /p Before with /p Wedge.
const SmallVectorImpl< VarLocInfo > * getWedge(const Instruction *Before) const
Return ptr to wedge of defs or nullptr if no defs come just before /p Before.
void addSingleLocVar(DebugVariable Var, DIExpression *Expr, DebugLoc DL, RawLocationWrapper R)
Add a def for a variable that is valid for its lifetime.
VariableID insertVariable(DebugVariable V)
Find or insert V and return the ID.
const DebugVariable & getVariable(VariableID ID) const
Get a variable from its ID.
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:449
an instruction to allocate memory on the stack
Definition: Instructions.h:58
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:450
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:437
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:601
int find_first_unset_in(unsigned Begin, unsigned End) const
find_first_unset_in - Returns the index of the first unset bit in the range [Begin,...
Definition: BitVector.h:261
BitVector & set()
Definition: BitVector.h:351
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
Assignment ID.
A structured debug information entry.
Definition: DIE.h:819
DWARF expression.
static DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
unsigned getNumElements() const
bool startsWithDeref() const
Return whether the first element a DW_OP_deref.
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
ArrayRef< uint64_t > getElements() const
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Debug location.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
StringRef getName() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
This represents the llvm.dbg.assign instruction.
DIAssignID * getAssignID() const
bool isKillAddress() const
Check whether this kills the address component.
This is the common base class for debug info intrinsics.
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
DILocalVariable * getVariable() const
DIExpression * getExpression() const
RawLocationWrapper getWrappedLocation() const
A debug info location.
Definition: DebugLoc.h:33
DILocation * getInlinedAt() const
Definition: DebugLoc.cpp:39
Identifies a unique instance of a variable.
const DILocation * getInlinedAt() const
const DILocalVariable * getVariable() const
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void init(unsigned InitNumEntries)
Definition: DenseMap.h:819
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Class representing an expression and its matching format.
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
Data structure describing the variable locations in a function.
void print(raw_ostream &OS, const Function &Fn) const
const VarLocInfo * locs_begin(const Instruction *Before) const
First variable location definition that comes before Before.
const VarLocInfo * single_locs_begin() const
const VarLocInfo * locs_end(const Instruction *Before) const
One past the last variable location definition that comes before Before.
const VarLocInfo * single_locs_end() const
One past the last single-location variable location definition.
void init(FunctionVarLocsBuilder &Builder)
size_t size() const
Definition: Function.h:799
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:341
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:438
const_iterator begin() const
Definition: IntervalMap.h:1146
void insert(KeyT a, KeyT b, ValT y)
insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
Definition: IntervalMap.h:1129
void clear()
clear - Remove all entries.
Definition: IntervalMap.h:1330
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1504
void push_back(MachineInstr *MI)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Root of the metadata hierarchy.
Definition: Metadata.h:62
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
Lightweight class that wraps the location operand metadata of a debug intrinsic.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
static IntegerType * getInt1Ty(LLVMContext &C)
UniqueVector - This class produces a sequential ID number (base 1) for each unique entry that is adde...
Definition: UniqueVector.h:24
unsigned insert(const T &Entry)
insert - Append entry to the vector if it doesn't already exist.
Definition: UniqueVector.h:40
size_t size() const
size - Returns the number of entries in the vector.
Definition: UniqueVector.h:87
iterator end()
Return an iterator to the end of the vector.
Definition: UniqueVector.h:81
iterator begin()
Return an iterator to the start of the vector.
Definition: UniqueVector.h:75
static ValueAsMetadata * get(Value *V)
Definition: Metadata.cpp:454
LLVM Value Representation.
Definition: Value.h:74
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
void deleteAll(Function *F)
Remove all Assignment Tracking related intrinsics and metadata from F.
Definition: DebugInfo.cpp:1789
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1748
std::optional< AssignmentInfo > getAssignmentInfo(const DataLayout &DL, const MemIntrinsic *I)
Definition: DebugInfo.cpp:1961
bool calculateFragmentIntersect(const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits, uint64_t SliceSizeInBits, const DbgAssignIntrinsic *DAI, std::optional< DIExpression::FragmentInfo > &Result)
Calculate the fragment of the variable in DAI covered from (Dest + SliceOffsetInBits) to to (Dest + S...
Definition: DebugInfo.cpp:1803
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
@ DW_OP_LLVM_fragment
Only used in LLVM metadata.
Definition: Dwarf.h:141
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:440
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1726
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2036
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isFunctionInPrintList(StringRef FunctionName)
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
VariableID
Type wrapper for integer ID for Variables. 0 is reserved.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2215
bool debuginfoShouldUseDebugInstrRef(const Triple &T)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
Holds the characteristics of one fragment of a larger variable.
uint64_t endInBits() const
Return the index of the bit after the end of the fragment, e.g.
uint64_t startInBits() const
Return the index of the first bit of the fragment.
static bool isEqual(const VariableID &LHS, const VariableID &RHS)
static unsigned getHashValue(const VariableID &Val)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:50
Variable location definition used by FunctionVarLocs.
RawLocationWrapper Values