LLVM 22.0.0git
MemoryProfileInfo.cpp
Go to the documentation of this file.
1//===-- MemoryProfileInfo.cpp - memory profile info ------------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains utilities to analyze memory profile information.
10//
11//===----------------------------------------------------------------------===//
12
15#include "llvm/IR/Constants.h"
18#include "llvm/Support/Format.h"
19
20using namespace llvm;
21using namespace llvm::memprof;
22
23#define DEBUG_TYPE "memory-profile-info"
24
25namespace llvm {
26
28 "memprof-report-hinted-sizes", cl::init(false), cl::Hidden,
29 cl::desc("Report total allocation sizes of hinted allocations"));
30
31// This is useful if we have enabled reporting of hinted sizes, and want to get
32// information from the indexing step for all contexts (especially for testing),
33// or have specified a value less than 100% for -memprof-cloning-cold-threshold.
35 "memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden,
36 cl::desc("Keep all non-cold contexts (increases cloning overheads)"));
37
39 "memprof-cloning-cold-threshold", cl::init(100), cl::Hidden,
40 cl::desc("Min percent of cold bytes to hint alloc cold during cloning"));
41
42// Discard non-cold contexts if they overlap with much larger cold contexts,
43// specifically, if all contexts reaching a given callsite are at least this
44// percent cold byte allocations. This reduces the amount of cloning required
45// to expose the cold contexts when they greatly dominate non-cold contexts.
47 "memprof-callsite-cold-threshold", cl::init(100), cl::Hidden,
48 cl::desc("Min percent of cold bytes at a callsite to discard non-cold "
49 "contexts"));
50
51// Enable saving context size information for largest cold contexts, which can
52// be used to flag contexts for more aggressive cloning and reporting.
54 "memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
55 cl::desc("Min percent of max cold bytes for critical cold context"));
56
58 "memprof-ambiguous-attributes", cl::init(true), cl::Hidden,
59 cl::desc("Apply ambiguous memprof attribute to ambiguous allocations"));
60
61} // end namespace llvm
62
66
70
75
77 LLVMContext &Ctx) {
79 StackVals.reserve(CallStack.size());
80 for (auto Id : CallStack) {
81 auto *StackValMD =
82 ValueAsMetadata::get(ConstantInt::get(Type::getInt64Ty(Ctx), Id));
83 StackVals.push_back(StackValMD);
84 }
85 return MDNode::get(Ctx, StackVals);
86}
87
89 assert(MIB->getNumOperands() >= 2);
90 // The stack metadata is the first operand of each memprof MIB metadata.
91 return cast<MDNode>(MIB->getOperand(0));
92}
93
95 assert(MIB->getNumOperands() >= 2);
96 // The allocation type is currently the second operand of each memprof
97 // MIB metadata. This will need to change as we add additional allocation
98 // types that can be applied based on the allocation profile data.
99 auto *MDS = dyn_cast<MDString>(MIB->getOperand(1));
100 assert(MDS);
101 if (MDS->getString() == "cold") {
103 } else if (MDS->getString() == "hot") {
104 return AllocationType::Hot;
105 }
107}
108
110 switch (Type) {
112 return "notcold";
113 break;
115 return "cold";
116 break;
118 return "hot";
119 break;
120 default:
121 assert(false && "Unexpected alloc type");
122 }
123 llvm_unreachable("invalid alloc type");
124}
125
127 const unsigned NumAllocTypes = llvm::popcount(AllocTypes);
128 assert(NumAllocTypes != 0);
129 return NumAllocTypes == 1;
130}
131
133 if (!CB->hasFnAttr("memprof"))
134 return;
135 assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
136 CB->removeFnAttr("memprof");
137}
138
141 return;
142 // We may have an existing ambiguous attribute if we are reanalyzing
143 // after inlining.
144 if (CB->hasFnAttr("memprof")) {
145 assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous");
146 } else {
147 auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous");
148 CB->addFnAttr(A);
149 }
150}
151
153 AllocationType AllocType, ArrayRef<uint64_t> StackIds,
154 std::vector<ContextTotalSize> ContextSizeInfo) {
155 bool First = true;
156 CallStackTrieNode *Curr = nullptr;
157 for (auto StackId : StackIds) {
158 // If this is the first stack frame, add or update alloc node.
159 if (First) {
160 First = false;
161 if (Alloc) {
162 assert(AllocStackId == StackId);
163 Alloc->addAllocType(AllocType);
164 } else {
165 AllocStackId = StackId;
166 Alloc = new CallStackTrieNode(AllocType);
167 }
168 Curr = Alloc;
169 continue;
170 }
171 // Update existing caller node if it exists.
172 auto [Next, Inserted] = Curr->Callers.try_emplace(StackId);
173 if (!Inserted) {
174 Curr = Next->second;
175 Curr->addAllocType(AllocType);
176 continue;
177 }
178 // Otherwise add a new caller node.
179 auto *New = new CallStackTrieNode(AllocType);
180 Next->second = New;
181 Curr = New;
182 }
183 assert(Curr);
184 llvm::append_range(Curr->ContextSizeInfo, ContextSizeInfo);
185}
186
188 // Note that we are building this from existing MD_memprof metadata.
189 BuiltFromExistingMetadata = true;
190 MDNode *StackMD = getMIBStackNode(MIB);
191 assert(StackMD);
192 std::vector<uint64_t> CallStack;
193 CallStack.reserve(StackMD->getNumOperands());
194 for (const auto &MIBStackIter : StackMD->operands()) {
195 auto *StackId = mdconst::dyn_extract<ConstantInt>(MIBStackIter);
196 assert(StackId);
197 CallStack.push_back(StackId->getZExtValue());
198 }
199 std::vector<ContextTotalSize> ContextSizeInfo;
200 // Collect the context size information if it exists.
201 if (MIB->getNumOperands() > 2) {
202 for (unsigned I = 2; I < MIB->getNumOperands(); I++) {
203 MDNode *ContextSizePair = dyn_cast<MDNode>(MIB->getOperand(I));
204 assert(ContextSizePair->getNumOperands() == 2);
205 uint64_t FullStackId =
207 ->getZExtValue();
208 uint64_t TotalSize =
210 ->getZExtValue();
211 ContextSizeInfo.push_back({FullStackId, TotalSize});
212 }
213 }
214 addCallStack(getMIBAllocType(MIB), CallStack, std::move(ContextSizeInfo));
215}
216
219 ArrayRef<ContextTotalSize> ContextSizeInfo,
220 const uint64_t MaxColdSize,
221 bool BuiltFromExistingMetadata,
222 uint64_t &TotalBytes, uint64_t &ColdBytes) {
223 SmallVector<Metadata *> MIBPayload(
224 {buildCallstackMetadata(MIBCallStack, Ctx)});
225 MIBPayload.push_back(
227
228 if (ContextSizeInfo.empty()) {
229 // The profile matcher should have provided context size info if there was a
230 // MinCallsiteColdBytePercent < 100. Here we check >=100 to gracefully
231 // handle a user-provided percent larger than 100. However, we may not have
232 // this information if we built the Trie from existing MD_memprof metadata.
233 assert(BuiltFromExistingMetadata || MinCallsiteColdBytePercent >= 100);
234 return MDNode::get(Ctx, MIBPayload);
235 }
236
237 for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
238 TotalBytes += TotalSize;
239 bool LargeColdContext = false;
241 ColdBytes += TotalSize;
242 // If we have the max cold context size from summary information and have
243 // requested identification of contexts above a percentage of the max, see
244 // if this context qualifies. We should assume this is large if we rebuilt
245 // the trie from existing metadata (i.e. to update after inlining), in
246 // which case we don't have a MaxSize from the profile - we assume any
247 // context size info in existence on the metadata should be propagated.
248 if (BuiltFromExistingMetadata ||
249 (MaxColdSize > 0 && MinPercentMaxColdSize < 100 &&
250 TotalSize * 100 >= MaxColdSize * MinPercentMaxColdSize))
251 LargeColdContext = true;
252 }
253 // Only add the context size info as metadata if we need it in the thin
254 // link (currently if reporting of hinted sizes is enabled, we have
255 // specified a threshold for marking allocations cold after cloning, or we
256 // have identified this as a large cold context of interest above).
257 if (metadataIncludesAllContextSizeInfo() || LargeColdContext) {
258 auto *FullStackIdMD = ValueAsMetadata::get(
259 ConstantInt::get(Type::getInt64Ty(Ctx), FullStackId));
260 auto *TotalSizeMD = ValueAsMetadata::get(
261 ConstantInt::get(Type::getInt64Ty(Ctx), TotalSize));
262 auto *ContextSizeMD = MDNode::get(Ctx, {FullStackIdMD, TotalSizeMD});
263 MIBPayload.push_back(ContextSizeMD);
264 }
265 }
266 assert(TotalBytes > 0);
267 return MDNode::get(Ctx, MIBPayload);
268}
269
270void CallStackTrie::collectContextSizeInfo(
271 CallStackTrieNode *Node, std::vector<ContextTotalSize> &ContextSizeInfo) {
272 llvm::append_range(ContextSizeInfo, Node->ContextSizeInfo);
273 for (auto &Caller : Node->Callers)
274 collectContextSizeInfo(Caller.second, ContextSizeInfo);
275}
276
277void CallStackTrie::convertHotToNotCold(CallStackTrieNode *Node) {
278 if (Node->hasAllocType(AllocationType::Hot)) {
279 Node->removeAllocType(AllocationType::Hot);
280 Node->addAllocType(AllocationType::NotCold);
281 }
282 for (auto &Caller : Node->Callers)
283 convertHotToNotCold(Caller.second);
284}
285
286// Copy over some or all of NewMIBNodes to the SavedMIBNodes vector, depending
287// on options that enable filtering out some NotCold contexts.
288static void saveFilteredNewMIBNodes(std::vector<Metadata *> &NewMIBNodes,
289 std::vector<Metadata *> &SavedMIBNodes,
290 unsigned CallerContextLength,
291 uint64_t TotalBytes, uint64_t ColdBytes,
292 bool BuiltFromExistingMetadata) {
293 const bool MostlyCold =
294 // If we have built the Trie from existing MD_memprof metadata, we may or
295 // may not have context size information (in which case ColdBytes and
296 // TotalBytes are 0, which is not also guarded against below). Even if we
297 // do have some context size information from the the metadata, we have
298 // already gone through a round of discarding of small non-cold contexts
299 // during matching, and it would be overly aggressive to do it again, and
300 // we also want to maintain the same behavior with and without reporting
301 // of hinted bytes enabled.
302 !BuiltFromExistingMetadata && MinCallsiteColdBytePercent < 100 &&
303 ColdBytes > 0 &&
304 ColdBytes * 100 >= MinCallsiteColdBytePercent * TotalBytes;
305
306 // In the simplest case, with pruning disabled, keep all the new MIB nodes.
307 if (MemProfKeepAllNotColdContexts && !MostlyCold) {
308 append_range(SavedMIBNodes, NewMIBNodes);
309 return;
310 }
311
312 auto EmitMessageForRemovedContexts = [](const MDNode *MIBMD, StringRef Tag,
313 StringRef Extra) {
314 assert(MIBMD->getNumOperands() > 2);
315 for (unsigned I = 2; I < MIBMD->getNumOperands(); I++) {
316 MDNode *ContextSizePair = dyn_cast<MDNode>(MIBMD->getOperand(I));
317 assert(ContextSizePair->getNumOperands() == 2);
318 uint64_t FullStackId =
320 ->getZExtValue();
321 uint64_t TS =
323 ->getZExtValue();
324 errs() << "MemProf hinting: Total size for " << Tag
325 << " non-cold full allocation context hash " << FullStackId
326 << Extra << ": " << TS << "\n";
327 }
328 };
329
330 // If the cold bytes at the current callsite exceed the given threshold, we
331 // discard all non-cold contexts so do not need any of the later pruning
332 // handling. We can simply copy over all the cold contexts and return early.
333 if (MostlyCold) {
334 auto NewColdMIBNodes =
335 make_filter_range(NewMIBNodes, [&](const Metadata *M) {
336 auto MIBMD = cast<MDNode>(M);
337 // Only append cold contexts.
339 return true;
341 const float PercentCold = ColdBytes * 100.0 / TotalBytes;
342 std::string PercentStr;
343 llvm::raw_string_ostream OS(PercentStr);
344 OS << format(" for %5.2f%% cold bytes", PercentCold);
345 EmitMessageForRemovedContexts(MIBMD, "discarded", OS.str());
346 }
347 return false;
348 });
349 for (auto *M : NewColdMIBNodes)
350 SavedMIBNodes.push_back(M);
351 return;
352 }
353
354 // Prune unneeded NotCold contexts, taking advantage of the fact
355 // that we later will only clone Cold contexts, as NotCold is the allocation
356 // default. We only need to keep as metadata the NotCold contexts that
357 // overlap the longest with Cold allocations, so that we know how deeply we
358 // need to clone. For example, assume we add the following contexts to the
359 // trie:
360 // 1 3 (notcold)
361 // 1 2 4 (cold)
362 // 1 2 5 (notcold)
363 // 1 2 6 (notcold)
364 // the trie looks like:
365 // 1
366 // / \
367 // 2 3
368 // /|\
369 // 4 5 6
370 //
371 // It is sufficient to prune all but one not-cold contexts (either 1,2,5 or
372 // 1,2,6, we arbitrarily keep the first one we encounter which will be
373 // 1,2,5).
374 //
375 // To do this pruning, we first check if there were any not-cold
376 // contexts kept for a deeper caller, which will have a context length larger
377 // than the CallerContextLength being handled here (i.e. kept by a deeper
378 // recursion step). If so, none of the not-cold MIB nodes added for the
379 // immediate callers need to be kept. If not, we keep the first (created
380 // for the immediate caller) not-cold MIB node.
381 bool LongerNotColdContextKept = false;
382 for (auto *MIB : NewMIBNodes) {
383 auto MIBMD = cast<MDNode>(MIB);
385 continue;
386 MDNode *StackMD = getMIBStackNode(MIBMD);
387 assert(StackMD);
388 if (StackMD->getNumOperands() > CallerContextLength) {
389 LongerNotColdContextKept = true;
390 break;
391 }
392 }
393 // Don't need to emit any for the immediate caller if we already have
394 // longer overlapping contexts;
395 bool KeepFirstNewNotCold = !LongerNotColdContextKept;
396 auto NewColdMIBNodes = make_filter_range(NewMIBNodes, [&](const Metadata *M) {
397 auto MIBMD = cast<MDNode>(M);
398 // Only keep cold contexts and first (longest non-cold context).
400 MDNode *StackMD = getMIBStackNode(MIBMD);
401 assert(StackMD);
402 // Keep any already kept for longer contexts.
403 if (StackMD->getNumOperands() > CallerContextLength)
404 return true;
405 // Otherwise keep the first one added by the immediate caller if there
406 // were no longer contexts.
407 if (KeepFirstNewNotCold) {
408 KeepFirstNewNotCold = false;
409 return true;
410 }
412 EmitMessageForRemovedContexts(MIBMD, "pruned", "");
413 return false;
414 }
415 return true;
416 });
417 for (auto *M : NewColdMIBNodes)
418 SavedMIBNodes.push_back(M);
419}
420
421// Recursive helper to trim contexts and create metadata nodes.
422// Caller should have pushed Node's loc to MIBCallStack. Doing this in the
423// caller makes it simpler to handle the many early returns in this method.
424// Updates the total and cold profiled bytes in the subtrie rooted at this node.
425bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
426 std::vector<uint64_t> &MIBCallStack,
427 std::vector<Metadata *> &MIBNodes,
428 bool CalleeHasAmbiguousCallerContext,
429 uint64_t &TotalBytes, uint64_t &ColdBytes) {
430 // Trim context below the first node in a prefix with a single alloc type.
431 // Add an MIB record for the current call stack prefix.
432 if (hasSingleAllocType(Node->AllocTypes)) {
433 std::vector<ContextTotalSize> ContextSizeInfo;
434 collectContextSizeInfo(Node, ContextSizeInfo);
435 MIBNodes.push_back(createMIBNode(
436 Ctx, MIBCallStack, (AllocationType)Node->AllocTypes, ContextSizeInfo,
437 MaxColdSize, BuiltFromExistingMetadata, TotalBytes, ColdBytes));
438 return true;
439 }
440
441 // We don't have a single allocation for all the contexts sharing this prefix,
442 // so recursively descend into callers in trie.
443 if (!Node->Callers.empty()) {
444 bool NodeHasAmbiguousCallerContext = Node->Callers.size() > 1;
445 bool AddedMIBNodesForAllCallerContexts = true;
446 // Accumulate all new MIB nodes by the recursive calls below into a vector
447 // that will later be filtered before adding to the caller's MIBNodes
448 // vector.
449 std::vector<Metadata *> NewMIBNodes;
450 // Determine the total and cold byte counts for all callers, then add to the
451 // caller's counts further below.
452 uint64_t CallerTotalBytes = 0;
453 uint64_t CallerColdBytes = 0;
454 for (auto &Caller : Node->Callers) {
455 MIBCallStack.push_back(Caller.first);
456 AddedMIBNodesForAllCallerContexts &= buildMIBNodes(
457 Caller.second, Ctx, MIBCallStack, NewMIBNodes,
458 NodeHasAmbiguousCallerContext, CallerTotalBytes, CallerColdBytes);
459 // Remove Caller.
460 MIBCallStack.pop_back();
461 }
462 // Pass in the stack length of the MIB nodes added for the immediate caller,
463 // which is the current stack length plus 1.
464 saveFilteredNewMIBNodes(NewMIBNodes, MIBNodes, MIBCallStack.size() + 1,
465 CallerTotalBytes, CallerColdBytes,
466 BuiltFromExistingMetadata);
467 TotalBytes += CallerTotalBytes;
468 ColdBytes += CallerColdBytes;
469
470 if (AddedMIBNodesForAllCallerContexts)
471 return true;
472 // We expect that the callers should be forced to add MIBs to disambiguate
473 // the context in this case (see below).
474 assert(!NodeHasAmbiguousCallerContext);
475 }
476
477 // If we reached here, then this node does not have a single allocation type,
478 // and we didn't add metadata for a longer call stack prefix including any of
479 // Node's callers. That means we never hit a single allocation type along all
480 // call stacks with this prefix. This can happen due to recursion collapsing
481 // or the stack being deeper than tracked by the profiler runtime, leading to
482 // contexts with different allocation types being merged. In that case, we
483 // trim the context just below the deepest context split, which is this
484 // node if the callee has an ambiguous caller context (multiple callers),
485 // since the recursive calls above returned false. Conservatively give it
486 // non-cold allocation type.
487 if (!CalleeHasAmbiguousCallerContext)
488 return false;
489 std::vector<ContextTotalSize> ContextSizeInfo;
490 collectContextSizeInfo(Node, ContextSizeInfo);
491 MIBNodes.push_back(createMIBNode(
492 Ctx, MIBCallStack, AllocationType::NotCold, ContextSizeInfo, MaxColdSize,
493 BuiltFromExistingMetadata, TotalBytes, ColdBytes));
494 return true;
495}
496
498 StringRef Descriptor) {
499 auto AllocTypeString = getAllocTypeAttributeString(AT);
500 auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString);
501 // After inlining we may be able to convert an existing ambiguous allocation
502 // to an unambiguous one.
504 CI->addFnAttr(A);
506 std::vector<ContextTotalSize> ContextSizeInfo;
507 collectContextSizeInfo(Alloc, ContextSizeInfo);
508 for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
509 errs() << "MemProf hinting: Total size for full allocation context hash "
510 << FullStackId << " and " << Descriptor << " alloc type "
511 << getAllocTypeAttributeString(AT) << ": " << TotalSize << "\n";
512 }
513 }
514 if (ORE)
515 ORE->emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CI)
516 << ore::NV("AllocationCall", CI) << " in function "
517 << ore::NV("Caller", CI->getFunction())
518 << " marked with memprof allocation attribute "
519 << ore::NV("Attribute", AllocTypeString));
520}
521
522// Build and attach the minimal necessary MIB metadata. If the alloc has a
523// single allocation type, add a function attribute instead. Returns true if
524// memprof metadata attached, false if not (attribute added).
526 if (hasSingleAllocType(Alloc->AllocTypes)) {
527 addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
528 "single");
529 return false;
530 }
531 // If there were any hot allocation contexts, the Alloc trie node would have
532 // the Hot type set. If so, because we don't currently support cloning for hot
533 // contexts, they should be converted to NotCold. This happens in the cloning
534 // support anyway, however, doing this now enables more aggressive context
535 // trimming when building the MIB metadata (and possibly may make the
536 // allocation have a single NotCold allocation type), greatly reducing
537 // overheads in bitcode, cloning memory and cloning time.
538 if (Alloc->hasAllocType(AllocationType::Hot)) {
539 convertHotToNotCold(Alloc);
540 // Check whether we now have a single alloc type.
541 if (hasSingleAllocType(Alloc->AllocTypes)) {
542 addSingleAllocTypeAttribute(CI, (AllocationType)Alloc->AllocTypes,
543 "single");
544 return false;
545 }
546 }
547 auto &Ctx = CI->getContext();
548 std::vector<uint64_t> MIBCallStack;
549 MIBCallStack.push_back(AllocStackId);
550 std::vector<Metadata *> MIBNodes;
551 uint64_t TotalBytes = 0;
552 uint64_t ColdBytes = 0;
553 assert(!Alloc->Callers.empty() && "addCallStack has not been called yet");
554 // The CalleeHasAmbiguousCallerContext flag is meant to say whether the
555 // callee of the given node has more than one caller. Here the node being
556 // passed in is the alloc and it has no callees. So it's false.
557 if (buildMIBNodes(Alloc, Ctx, MIBCallStack, MIBNodes,
558 /*CalleeHasAmbiguousCallerContext=*/false, TotalBytes,
559 ColdBytes)) {
560 assert(MIBCallStack.size() == 1 &&
561 "Should only be left with Alloc's location in stack");
562 CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes));
564 return true;
565 }
566 // If there exists corner case that CallStackTrie has one chain to leaf
567 // and all node in the chain have multi alloc type, conservatively give
568 // it non-cold allocation type.
569 // FIXME: Avoid this case before memory profile created. Alternatively, select
570 // hint based on fraction cold.
572 return false;
573}
574
575template <>
577 const MDNode *N, bool End)
578 : N(N) {
579 if (!N)
580 return;
581 Iter = End ? N->op_end() : N->op_begin();
582}
583
584template <>
587 assert(Iter != N->op_end());
589 assert(StackIdCInt);
590 return StackIdCInt->getZExtValue();
591}
592
594 assert(N);
595 return mdconst::dyn_extract<ConstantInt>(N->operands().back())
596 ->getZExtValue();
597}
598
600 // TODO: Support more sophisticated merging, such as selecting the one with
601 // more bytes allocated, or implement support for carrying multiple allocation
602 // leaf contexts. For now, keep the first one.
603 if (A)
604 return A;
605 return B;
606}
607
609 // TODO: Support more sophisticated merging, which will require support for
610 // carrying multiple contexts. For now, keep the first one.
611 if (A)
612 return A;
613 return B;
614}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_ABI
Definition Compiler.h:213
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define DEBUG_TYPE
#define I(x, y, z)
Definition MD5.cpp:58
AllocType
static MDNode * createMIBNode(LLVMContext &Ctx, ArrayRef< uint64_t > MIBCallStack, AllocationType AllocType, ArrayRef< ContextTotalSize > ContextSizeInfo, const uint64_t MaxColdSize, bool BuiltFromExistingMetadata, uint64_t &TotalBytes, uint64_t &ColdBytes)
static void saveFilteredNewMIBNodes(std::vector< Metadata * > &NewMIBNodes, std::vector< Metadata * > &SavedMIBNodes, unsigned CallerContextLength, uint64_t TotalBytes, uint64_t ColdBytes, bool BuiltFromExistingMetadata)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Attribute getFnAttr(StringRef Kind) const
Get the attribute of a given kind for the function.
void removeFnAttr(Attribute::AttrKind Kind)
Removes the attribute from the function.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Metadata node.
Definition Metadata.h:1078
static LLVM_ABI MDNode * getMergedCallsiteMetadata(MDNode *A, MDNode *B)
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
LLVM_ABI MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, ArrayRef< Metadata * > Ops1, ArrayRef< Metadata * > Ops2={})
Definition Metadata.cpp:652
static LLVM_ABI MDNode * getMergedMemProfMetadata(MDNode *A, MDNode *B)
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
void push_back(Metadata *MD)
Append an element to the tuple. This will resize the node.
Definition Metadata.h:1555
Root of the metadata hierarchy.
Definition Metadata.h:64
Diagnostic information for applied optimization remarks.
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:298
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:503
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1091
LLVM_ABI void addCallStack(AllocationType AllocType, ArrayRef< uint64_t > StackIds, std::vector< ContextTotalSize > ContextSizeInfo={})
Add a call stack context with the given allocation type to the Trie.
LLVM_ABI void addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT, StringRef Descriptor)
Add an attribute for the given allocation type to the call instruction.
LLVM_ABI bool buildAndAttachMIBMetadata(CallBase *CI)
Build and attach the minimal necessary MIB metadata.
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > dyn_extract(Y &&MD)
Extract a Value from Metadata, if any.
Definition Metadata.h:695
LLVM_ABI MDNode * buildCallstackMetadata(ArrayRef< uint64_t > CallStack, LLVMContext &Ctx)
Build callstack metadata from the provided list of call stack ids.
LLVM_ABI bool recordContextSizeInfoForAnalysis()
Whether we need to record the context size info in the alloc trie used to build metadata.
LLVM_ABI bool metadataIncludesAllContextSizeInfo()
Whether the alloc memeprof metadata will include context size info for all MIBs.
LLVM_ABI AllocationType getMIBAllocType(const MDNode *MIB)
Returns the allocation type from an MIB metadata node.
LLVM_ABI bool metadataMayIncludeContextSizeInfo()
Whether the alloc memprof metadata may include context size info for some MIBs (but possibly not all)...
LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes)
True if the AllocTypes bitmask contains just a single type.
LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type)
Returns the string to use in attributes with the given type.
LLVM_ABI MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB)
Removes any existing "ambiguous" memprof attribute.
LLVM_ABI void addAmbiguousAttribute(CallBase *CB)
Adds an "ambiguous" memprof attribute to call with a matched allocation profile but that we haven't y...
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
cl::opt< unsigned > MinClonedColdBytePercent("memprof-cloning-cold-threshold", cl::init(100), cl::Hidden, cl::desc("Min percent of cold bytes to hint alloc cold during cloning"))
cl::opt< bool > MemProfReportHintedSizes("memprof-report-hinted-sizes", cl::init(false), cl::Hidden, cl::desc("Report total allocation sizes of hinted allocations"))
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
LLVM_ABI cl::opt< bool > MemProfKeepAllNotColdContexts("memprof-keep-all-not-cold-contexts", cl::init(false), cl::Hidden, cl::desc("Keep all non-cold contexts (increases cloning overheads)"))
LLVM_ABI cl::opt< bool > MemProfUseAmbiguousAttributes("memprof-ambiguous-attributes", cl::init(true), cl::Hidden, cl::desc("Apply ambiguous memprof attribute to ambiguous allocations"))
cl::opt< unsigned > MinCallsiteColdBytePercent("memprof-callsite-cold-threshold", cl::init(100), cl::Hidden, cl::desc("Min percent of cold bytes at a callsite to discard non-cold " "contexts"))
iterator_range< filter_iterator< detail::IterOfRange< RangeT >, PredicateT > > make_filter_range(RangeT &&Range, PredicateT Pred)
Convenience function that takes a range of elements and a predicate, and return a new filter_iterator...
Definition STLExtras.h:550
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition Format.h:129
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
cl::opt< unsigned > MinPercentMaxColdSize("memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden, cl::desc("Min percent of max cold bytes for critical cold context"))
#define N