LLVM 19.0.0git
InstrProfWriter.cpp
Go to the documentation of this file.
1//===- InstrProfWriter.cpp - Instrumented profiling writer ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for writing profiling data for clang's
10// instrumentation based PGO and coverage.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/StringRef.h"
23#include "llvm/Support/Endian.h"
25#include "llvm/Support/Error.h"
30#include <cstdint>
31#include <memory>
32#include <string>
33#include <tuple>
34#include <utility>
35#include <vector>
36
37using namespace llvm;
38
39// A struct to define how the data stream should be patched. For Indexed
40// profiling, only uint64_t data type is needed.
41struct PatchItem {
42 uint64_t Pos; // Where to patch.
43 uint64_t *D; // Pointer to an array of source data.
44 int N; // Number of elements in \c D array.
45};
46
47namespace llvm {
48
49// A wrapper class to abstract writer stream with support of bytes
50// back patching.
52public:
54 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
56 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
57
58 uint64_t tell() { return OS.tell(); }
59 void write(uint64_t V) { LE.write<uint64_t>(V); }
60 void writeByte(uint8_t V) { LE.write<uint8_t>(V); }
61
62 // \c patch can only be called when all data is written and flushed.
63 // For raw_string_ostream, the patch is done on the target string
64 // directly and it won't be reflected in the stream's internal buffer.
66 using namespace support;
67
68 if (IsFDOStream) {
69 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
70 const uint64_t LastPos = FDOStream.tell();
71 for (const auto &K : P) {
72 FDOStream.seek(K.Pos);
73 for (int I = 0; I < K.N; I++)
74 write(K.D[I]);
75 }
76 // Reset the stream to the last position after patching so that users
77 // don't accidentally overwrite data. This makes it consistent with
78 // the string stream below which replaces the data directly.
79 FDOStream.seek(LastPos);
80 } else {
81 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
82 std::string &Data = SOStream.str(); // with flush
83 for (const auto &K : P) {
84 for (int I = 0; I < K.N; I++) {
85 uint64_t Bytes =
86 endian::byte_swap<uint64_t, llvm::endianness::little>(K.D[I]);
87 Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t),
88 (const char *)&Bytes, sizeof(uint64_t));
89 }
90 }
91 }
92 }
93
94 // If \c OS is an instance of \c raw_fd_ostream, this field will be
95 // true. Otherwise, \c OS will be an raw_string_ostream.
99};
100
102public:
105
108
111
115
117
120 }
121
122 static std::pair<offset_type, offset_type>
124 using namespace support;
125
127
128 offset_type N = K.size();
129 LE.write<offset_type>(N);
130
131 offset_type M = 0;
132 for (const auto &ProfileData : *V) {
133 const InstrProfRecord &ProfRecord = ProfileData.second;
134 M += sizeof(uint64_t); // The function hash
135 M += sizeof(uint64_t); // The size of the Counts vector
136 M += ProfRecord.Counts.size() * sizeof(uint64_t);
137 M += sizeof(uint64_t); // The size of the Bitmap vector
138 M += ProfRecord.BitmapBytes.size() * sizeof(uint64_t);
139
140 // Value data
141 M += ValueProfData::getSize(ProfileData.second);
142 }
143 LE.write<offset_type>(M);
144
145 return std::make_pair(N, M);
146 }
147
149 Out.write(K.data(), N);
150 }
151
153 using namespace support;
154
156 for (const auto &ProfileData : *V) {
157 const InstrProfRecord &ProfRecord = ProfileData.second;
158 if (NamedInstrProfRecord::hasCSFlagInHash(ProfileData.first))
159 CSSummaryBuilder->addRecord(ProfRecord);
160 else
161 SummaryBuilder->addRecord(ProfRecord);
162
163 LE.write<uint64_t>(ProfileData.first); // Function hash
164 LE.write<uint64_t>(ProfRecord.Counts.size());
165 for (uint64_t I : ProfRecord.Counts)
166 LE.write<uint64_t>(I);
167
168 LE.write<uint64_t>(ProfRecord.BitmapBytes.size());
169 for (uint64_t I : ProfRecord.BitmapBytes)
170 LE.write<uint64_t>(I);
171
172 // Write value data
173 std::unique_ptr<ValueProfData> VDataPtr =
174 ValueProfData::serializeFrom(ProfileData.second);
175 uint32_t S = VDataPtr->getSize();
176 VDataPtr->swapBytesFromHost(ValueProfDataEndianness);
177 Out.write((const char *)VDataPtr.get(), S);
178 }
179 }
180};
181
182} // end namespace llvm
183
185 bool Sparse, uint64_t TemporalProfTraceReservoirSize,
186 uint64_t MaxTemporalProfTraceLength, bool WritePrevVersion,
187 memprof::IndexedVersion MemProfVersionRequested)
188 : Sparse(Sparse), MaxTemporalProfTraceLength(MaxTemporalProfTraceLength),
189 TemporalProfTraceReservoirSize(TemporalProfTraceReservoirSize),
190 InfoObj(new InstrProfRecordWriterTrait()),
191 WritePrevVersion(WritePrevVersion),
192 MemProfVersionRequested(MemProfVersionRequested) {}
193
195
196// Internal interface for testing purpose only.
198 InfoObj->ValueProfDataEndianness = Endianness;
199}
200
202 this->Sparse = Sparse;
203}
204
206 function_ref<void(Error)> Warn) {
207 auto Name = I.Name;
208 auto Hash = I.Hash;
209 addRecord(Name, Hash, std::move(I), Weight, Warn);
210}
211
213 OverlapStats &Overlap,
214 OverlapStats &FuncLevelOverlap,
215 const OverlapFuncFilters &FuncFilter) {
216 auto Name = Other.Name;
217 auto Hash = Other.Hash;
218 Other.accumulateCounts(FuncLevelOverlap.Test);
219 if (!FunctionData.contains(Name)) {
220 Overlap.addOneUnique(FuncLevelOverlap.Test);
221 return;
222 }
223 if (FuncLevelOverlap.Test.CountSum < 1.0f) {
224 Overlap.Overlap.NumEntries += 1;
225 return;
226 }
227 auto &ProfileDataMap = FunctionData[Name];
228 bool NewFunc;
230 std::tie(Where, NewFunc) =
231 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
232 if (NewFunc) {
233 Overlap.addOneMismatch(FuncLevelOverlap.Test);
234 return;
235 }
236 InstrProfRecord &Dest = Where->second;
237
238 uint64_t ValueCutoff = FuncFilter.ValueCutoff;
239 if (!FuncFilter.NameFilter.empty() && Name.contains(FuncFilter.NameFilter))
240 ValueCutoff = 0;
241
242 Dest.overlap(Other, Overlap, FuncLevelOverlap, ValueCutoff);
243}
244
246 InstrProfRecord &&I, uint64_t Weight,
247 function_ref<void(Error)> Warn) {
248 auto &ProfileDataMap = FunctionData[Name];
249
250 bool NewFunc;
252 std::tie(Where, NewFunc) =
253 ProfileDataMap.insert(std::make_pair(Hash, InstrProfRecord()));
254 InstrProfRecord &Dest = Where->second;
255
256 auto MapWarn = [&](instrprof_error E) {
257 Warn(make_error<InstrProfError>(E));
258 };
259
260 if (NewFunc) {
261 // We've never seen a function with this name and hash, add it.
262 Dest = std::move(I);
263 if (Weight > 1)
264 Dest.scale(Weight, 1, MapWarn);
265 } else {
266 // We're updating a function we've seen before.
267 Dest.merge(I, Weight, MapWarn);
268 }
269
270 Dest.sortValueData();
271}
272
275 auto Result = MemProfRecordData.insert({Id, Record});
276 // If we inserted a new record then we are done.
277 if (Result.second) {
278 return;
279 }
280 memprof::IndexedMemProfRecord &Existing = Result.first->second;
281 Existing.merge(Record);
282}
283
285 const memprof::Frame &Frame,
286 function_ref<void(Error)> Warn) {
287 auto Result = MemProfFrameData.insert({Id, Frame});
288 // If a mapping already exists for the current frame id and it does not
289 // match the new mapping provided then reset the existing contents and bail
290 // out. We don't support the merging of memprof data whose Frame -> Id
291 // mapping across profiles is inconsistent.
292 if (!Result.second && Result.first->second != Frame) {
293 Warn(make_error<InstrProfError>(instrprof_error::malformed,
294 "frame to id mapping mismatch"));
295 return false;
296 }
297 return true;
298}
299
301 llvm::append_range(BinaryIds, BIs);
302}
303
304void InstrProfWriter::addTemporalProfileTrace(TemporalProfTraceTy Trace) {
305 if (Trace.FunctionNameRefs.size() > MaxTemporalProfTraceLength)
306 Trace.FunctionNameRefs.resize(MaxTemporalProfTraceLength);
307 if (Trace.FunctionNameRefs.empty())
308 return;
309
310 if (TemporalProfTraceStreamSize < TemporalProfTraceReservoirSize) {
311 // Simply append the trace if we have not yet hit our reservoir size limit.
312 TemporalProfTraces.push_back(std::move(Trace));
313 } else {
314 // Otherwise, replace a random trace in the stream.
315 std::uniform_int_distribution<uint64_t> Distribution(
316 0, TemporalProfTraceStreamSize);
317 uint64_t RandomIndex = Distribution(RNG);
318 if (RandomIndex < TemporalProfTraces.size())
319 TemporalProfTraces[RandomIndex] = std::move(Trace);
320 }
321 ++TemporalProfTraceStreamSize;
322}
323
325 SmallVectorImpl<TemporalProfTraceTy> &SrcTraces, uint64_t SrcStreamSize) {
326 // Assume that the source has the same reservoir size as the destination to
327 // avoid needing to record it in the indexed profile format.
328 bool IsDestSampled =
329 (TemporalProfTraceStreamSize > TemporalProfTraceReservoirSize);
330 bool IsSrcSampled = (SrcStreamSize > TemporalProfTraceReservoirSize);
331 if (!IsDestSampled && IsSrcSampled) {
332 // If one of the traces are sampled, ensure that it belongs to Dest.
333 std::swap(TemporalProfTraces, SrcTraces);
334 std::swap(TemporalProfTraceStreamSize, SrcStreamSize);
335 std::swap(IsDestSampled, IsSrcSampled);
336 }
337 if (!IsSrcSampled) {
338 // If the source stream is not sampled, we add each source trace normally.
339 for (auto &Trace : SrcTraces)
340 addTemporalProfileTrace(std::move(Trace));
341 return;
342 }
343 // Otherwise, we find the traces that would have been removed if we added
344 // the whole source stream.
345 SmallSetVector<uint64_t, 8> IndicesToReplace;
346 for (uint64_t I = 0; I < SrcStreamSize; I++) {
347 std::uniform_int_distribution<uint64_t> Distribution(
348 0, TemporalProfTraceStreamSize);
349 uint64_t RandomIndex = Distribution(RNG);
350 if (RandomIndex < TemporalProfTraces.size())
351 IndicesToReplace.insert(RandomIndex);
352 ++TemporalProfTraceStreamSize;
353 }
354 // Then we insert a random sample of the source traces.
355 llvm::shuffle(SrcTraces.begin(), SrcTraces.end(), RNG);
356 for (const auto &[Index, Trace] : llvm::zip(IndicesToReplace, SrcTraces))
357 TemporalProfTraces[Index] = std::move(Trace);
358}
359
361 function_ref<void(Error)> Warn) {
362 for (auto &I : IPW.FunctionData)
363 for (auto &Func : I.getValue())
364 addRecord(I.getKey(), Func.first, std::move(Func.second), 1, Warn);
365
366 BinaryIds.reserve(BinaryIds.size() + IPW.BinaryIds.size());
367 for (auto &I : IPW.BinaryIds)
369
370 addTemporalProfileTraces(IPW.TemporalProfTraces,
371 IPW.TemporalProfTraceStreamSize);
372
373 MemProfFrameData.reserve(IPW.MemProfFrameData.size());
374 for (auto &I : IPW.MemProfFrameData) {
375 // If we weren't able to add the frame mappings then it doesn't make sense
376 // to try to merge the records from this profile.
377 if (!addMemProfFrame(I.first, I.second, Warn))
378 return;
379 }
380
381 MemProfRecordData.reserve(IPW.MemProfRecordData.size());
382 for (auto &I : IPW.MemProfRecordData) {
383 addMemProfRecord(I.first, I.second);
384 }
385}
386
387bool InstrProfWriter::shouldEncodeData(const ProfilingData &PD) {
388 if (!Sparse)
389 return true;
390 for (const auto &Func : PD) {
391 const InstrProfRecord &IPR = Func.second;
392 if (llvm::any_of(IPR.Counts, [](uint64_t Count) { return Count > 0; }))
393 return true;
394 if (llvm::any_of(IPR.BitmapBytes, [](uint8_t Byte) { return Byte > 0; }))
395 return true;
396 }
397 return false;
398}
399
400static void setSummary(IndexedInstrProf::Summary *TheSummary,
401 ProfileSummary &PS) {
402 using namespace IndexedInstrProf;
403
404 const std::vector<ProfileSummaryEntry> &Res = PS.getDetailedSummary();
405 TheSummary->NumSummaryFields = Summary::NumKinds;
406 TheSummary->NumCutoffEntries = Res.size();
407 TheSummary->set(Summary::MaxFunctionCount, PS.getMaxFunctionCount());
408 TheSummary->set(Summary::MaxBlockCount, PS.getMaxCount());
409 TheSummary->set(Summary::MaxInternalBlockCount, PS.getMaxInternalCount());
410 TheSummary->set(Summary::TotalBlockCount, PS.getTotalCount());
411 TheSummary->set(Summary::TotalNumBlocks, PS.getNumCounts());
412 TheSummary->set(Summary::TotalNumFunctions, PS.getNumFunctions());
413 for (unsigned I = 0; I < Res.size(); I++)
414 TheSummary->setEntry(I, Res[I]);
415}
416
417// Serialize Schema.
419 const memprof::MemProfSchema &Schema) {
420 OS.write(static_cast<uint64_t>(Schema.size()));
421 for (const auto Id : Schema)
422 OS.write(static_cast<uint64_t>(Id));
423}
424
425// Serialize MemProfRecordData. Return RecordTableOffset.
429 &MemProfRecordData,
430 memprof::MemProfSchema *Schema) {
432 RecordWriter.Schema = Schema;
434 RecordTableGenerator;
435 for (auto &[GUID, Record] : MemProfRecordData) {
436 // Insert the key (func hash) and value (memprof record).
437 RecordTableGenerator.insert(GUID, Record, RecordWriter);
438 }
439 // Release the memory of this MapVector as it is no longer needed.
440 MemProfRecordData.clear();
441
442 // The call to Emit invokes RecordWriterTrait::EmitData which destructs
443 // the memprof record copies owned by the RecordTableGenerator. This works
444 // because the RecordTableGenerator is not used after this point.
445 return RecordTableGenerator.Emit(OS.OS, RecordWriter);
446}
447
448// Serialize MemProfFrameData. Return FrameTableOffset.
453 FrameTableGenerator;
454 for (auto &[FrameId, Frame] : MemProfFrameData) {
455 // Insert the key (frame id) and value (frame contents).
456 FrameTableGenerator.insert(FrameId, Frame);
457 }
458 // Release the memory of this MapVector as it is no longer needed.
459 MemProfFrameData.clear();
460
461 return FrameTableGenerator.Emit(OS.OS);
462}
463
467 &MemProfRecordData,
469 uint64_t HeaderUpdatePos = OS.tell();
470 OS.write(0ULL); // Reserve space for the memprof record table offset.
471 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
472 OS.write(0ULL); // Reserve space for the memprof frame table offset.
473
475 writeMemProfSchema(OS, Schema);
476
477 uint64_t RecordTableOffset =
478 writeMemProfRecords(OS, MemProfRecordData, &Schema);
479
480 uint64_t FramePayloadOffset = OS.tell();
481 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfFrameData);
482
483 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
484 OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
485
486 return Error::success();
487}
488
492 &MemProfRecordData,
495 uint64_t HeaderUpdatePos = OS.tell();
496 OS.write(0ULL); // Reserve space for the memprof record table offset.
497 OS.write(0ULL); // Reserve space for the memprof frame payload offset.
498 OS.write(0ULL); // Reserve space for the memprof frame table offset.
499
501 writeMemProfSchema(OS, Schema);
502
503 uint64_t RecordTableOffset =
504 writeMemProfRecords(OS, MemProfRecordData, &Schema);
505
506 uint64_t FramePayloadOffset = OS.tell();
507 uint64_t FrameTableOffset = writeMemProfFrames(OS, MemProfFrameData);
508
509 uint64_t Header[] = {RecordTableOffset, FramePayloadOffset, FrameTableOffset};
510 OS.patch({{HeaderUpdatePos, Header, std::size(Header)}});
511
512 return Error::success();
513}
514
515// The MemProf profile data includes a simple schema
516// with the format described below followed by the hashtable:
517// uint64_t Version
518// uint64_t RecordTableOffset = RecordTableGenerator.Emit
519// uint64_t FramePayloadOffset = Stream offset before emitting the frame table
520// uint64_t FrameTableOffset = FrameTableGenerator.Emit
521// uint64_t Num schema entries
522// uint64_t Schema entry 0
523// uint64_t Schema entry 1
524// ....
525// uint64_t Schema entry N - 1
526// OnDiskChainedHashTable MemProfRecordData
527// OnDiskChainedHashTable MemProfFrameData
531 &MemProfRecordData,
533 memprof::IndexedVersion MemProfVersionRequested) {
534
535 switch (MemProfVersionRequested) {
537 return writeMemProfV0(OS, MemProfRecordData, MemProfFrameData);
539 return writeMemProfV1(OS, MemProfRecordData, MemProfFrameData);
541 // TODO: Implement. Fall through to the error handling below for now.
542 break;
543 }
544
545 return make_error<InstrProfError>(
547 formatv("MemProf version {} not supported; "
548 "requires version between {} and {}, inclusive",
549 MemProfVersionRequested, memprof::MinimumSupportedVersion,
551}
552
553Error InstrProfWriter::writeImpl(ProfOStream &OS) {
554 using namespace IndexedInstrProf;
555 using namespace support;
556
558
560 InfoObj->SummaryBuilder = &ISB;
562 InfoObj->CSSummaryBuilder = &CSISB;
563
564 // Populate the hash table generator.
566 for (const auto &I : FunctionData)
567 if (shouldEncodeData(I.getValue()))
568 OrderedData.emplace_back((I.getKey()), &I.getValue());
569 llvm::sort(OrderedData, less_first());
570 for (const auto &I : OrderedData)
571 Generator.insert(I.first, I.second);
572
573 // Write the header.
575 Header.Magic = IndexedInstrProf::Magic;
576 Header.Version = WritePrevVersion
579 // The WritePrevVersion handling will either need to be removed or updated
580 // if the version is advanced beyond 12.
583 if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
584 Header.Version |= VARIANT_MASK_IR_PROF;
585 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
586 Header.Version |= VARIANT_MASK_CSIR_PROF;
587 if (static_cast<bool>(ProfileKind &
589 Header.Version |= VARIANT_MASK_INSTR_ENTRY;
590 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
591 Header.Version |= VARIANT_MASK_BYTE_COVERAGE;
592 if (static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly))
593 Header.Version |= VARIANT_MASK_FUNCTION_ENTRY_ONLY;
594 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf))
595 Header.Version |= VARIANT_MASK_MEMPROF;
596 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
597 Header.Version |= VARIANT_MASK_TEMPORAL_PROF;
598
599 Header.Unused = 0;
600 Header.HashType = static_cast<uint64_t>(IndexedInstrProf::HashType);
601 Header.HashOffset = 0;
602 Header.MemProfOffset = 0;
603 Header.BinaryIdOffset = 0;
604 Header.TemporalProfTracesOffset = 0;
605 Header.VTableNamesOffset = 0;
606
607 // Only write out the first four fields. We need to remember the offset of the
608 // remaining fields to allow back patching later.
609 for (int I = 0; I < 4; I++)
610 OS.write(reinterpret_cast<uint64_t *>(&Header)[I]);
611
612 // Save the location of Header.HashOffset field in \c OS.
613 uint64_t HashTableStartFieldOffset = OS.tell();
614 // Reserve the space for HashOffset field.
615 OS.write(0);
616
617 // Save the location of MemProf profile data. This is stored in two parts as
618 // the schema and as a separate on-disk chained hashtable.
619 uint64_t MemProfSectionOffset = OS.tell();
620 // Reserve space for the MemProf table field to be patched later if this
621 // profile contains memory profile information.
622 OS.write(0);
623
624 // Save the location of binary ids section.
625 uint64_t BinaryIdSectionOffset = OS.tell();
626 // Reserve space for the BinaryIdOffset field to be patched later if this
627 // profile contains binary ids.
628 OS.write(0);
629
630 uint64_t TemporalProfTracesOffset = OS.tell();
631 OS.write(0);
632
633 uint64_t VTableNamesOffset = OS.tell();
634 if (!WritePrevVersion)
635 OS.write(0);
636
637 // Reserve space to write profile summary data.
639 uint32_t SummarySize = Summary::getSize(Summary::NumKinds, NumEntries);
640 // Remember the summary offset.
641 uint64_t SummaryOffset = OS.tell();
642 for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++)
643 OS.write(0);
644 uint64_t CSSummaryOffset = 0;
645 uint64_t CSSummarySize = 0;
646 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
647 CSSummaryOffset = OS.tell();
648 CSSummarySize = SummarySize / sizeof(uint64_t);
649 for (unsigned I = 0; I < CSSummarySize; I++)
650 OS.write(0);
651 }
652
653 // Write the hash table.
654 uint64_t HashTableStart = Generator.Emit(OS.OS, *InfoObj);
655
656 // Write the MemProf profile data if we have it.
657 uint64_t MemProfSectionStart = 0;
658 if (static_cast<bool>(ProfileKind & InstrProfKind::MemProf)) {
659 MemProfSectionStart = OS.tell();
660 if (auto E = writeMemProf(OS, MemProfRecordData, MemProfFrameData,
661 MemProfVersionRequested))
662 return E;
663 }
664
665 // BinaryIdSection has two parts:
666 // 1. uint64_t BinaryIdsSectionSize
667 // 2. list of binary ids that consist of:
668 // a. uint64_t BinaryIdLength
669 // b. uint8_t BinaryIdData
670 // c. uint8_t Padding (if necessary)
671 uint64_t BinaryIdSectionStart = OS.tell();
672 // Calculate size of binary section.
673 uint64_t BinaryIdsSectionSize = 0;
674
675 // Remove duplicate binary ids.
676 llvm::sort(BinaryIds);
677 BinaryIds.erase(std::unique(BinaryIds.begin(), BinaryIds.end()),
678 BinaryIds.end());
679
680 for (auto BI : BinaryIds) {
681 // Increment by binary id length data type size.
682 BinaryIdsSectionSize += sizeof(uint64_t);
683 // Increment by binary id data length, aligned to 8 bytes.
684 BinaryIdsSectionSize += alignToPowerOf2(BI.size(), sizeof(uint64_t));
685 }
686 // Write binary ids section size.
687 OS.write(BinaryIdsSectionSize);
688
689 for (auto BI : BinaryIds) {
690 uint64_t BILen = BI.size();
691 // Write binary id length.
692 OS.write(BILen);
693 // Write binary id data.
694 for (unsigned K = 0; K < BILen; K++)
695 OS.writeByte(BI[K]);
696 // Write padding if necessary.
697 uint64_t PaddingSize = alignToPowerOf2(BILen, sizeof(uint64_t)) - BILen;
698 for (unsigned K = 0; K < PaddingSize; K++)
699 OS.writeByte(0);
700 }
701
702 uint64_t VTableNamesSectionStart = OS.tell();
703
704 if (!WritePrevVersion) {
705 std::vector<std::string> VTableNameStrs;
706 for (StringRef VTableName : VTableNames.keys())
707 VTableNameStrs.push_back(VTableName.str());
708
709 std::string CompressedVTableNames;
710 if (!VTableNameStrs.empty())
712 VTableNameStrs, compression::zlib::isAvailable(),
713 CompressedVTableNames))
714 return E;
715
716 const uint64_t CompressedStringLen = CompressedVTableNames.length();
717
718 // Record the length of compressed string.
719 OS.write(CompressedStringLen);
720
721 // Write the chars in compressed strings.
722 for (auto &c : CompressedVTableNames)
723 OS.writeByte(static_cast<uint8_t>(c));
724
725 // Pad up to a multiple of 8.
726 // InstrProfReader could read bytes according to 'CompressedStringLen'.
727 const uint64_t PaddedLength = alignTo(CompressedStringLen, 8);
728
729 for (uint64_t K = CompressedStringLen; K < PaddedLength; K++)
730 OS.writeByte(0);
731 }
732
733 uint64_t TemporalProfTracesSectionStart = 0;
734 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile)) {
735 TemporalProfTracesSectionStart = OS.tell();
736 OS.write(TemporalProfTraces.size());
737 OS.write(TemporalProfTraceStreamSize);
738 for (auto &Trace : TemporalProfTraces) {
739 OS.write(Trace.Weight);
740 OS.write(Trace.FunctionNameRefs.size());
741 for (auto &NameRef : Trace.FunctionNameRefs)
742 OS.write(NameRef);
743 }
744 }
745
746 // Allocate space for data to be serialized out.
747 std::unique_ptr<IndexedInstrProf::Summary> TheSummary =
749 // Compute the Summary and copy the data to the data
750 // structure to be serialized out (to disk or buffer).
751 std::unique_ptr<ProfileSummary> PS = ISB.getSummary();
752 setSummary(TheSummary.get(), *PS);
753 InfoObj->SummaryBuilder = nullptr;
754
755 // For Context Sensitive summary.
756 std::unique_ptr<IndexedInstrProf::Summary> TheCSSummary = nullptr;
757 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive)) {
758 TheCSSummary = IndexedInstrProf::allocSummary(SummarySize);
759 std::unique_ptr<ProfileSummary> CSPS = CSISB.getSummary();
760 setSummary(TheCSSummary.get(), *CSPS);
761 }
762 InfoObj->CSSummaryBuilder = nullptr;
763
764 if (!WritePrevVersion) {
765 // Now do the final patch:
766 PatchItem PatchItems[] = {
767 // Patch the Header.HashOffset field.
768 {HashTableStartFieldOffset, &HashTableStart, 1},
769 // Patch the Header.MemProfOffset (=0 for profiles without MemProf
770 // data).
771 {MemProfSectionOffset, &MemProfSectionStart, 1},
772 // Patch the Header.BinaryIdSectionOffset.
773 {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
774 // Patch the Header.TemporalProfTracesOffset (=0 for profiles without
775 // traces).
776 {TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
777 {VTableNamesOffset, &VTableNamesSectionStart, 1},
778 // Patch the summary data.
779 {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
780 (int)(SummarySize / sizeof(uint64_t))},
781 {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
782 (int)CSSummarySize}};
783
784 OS.patch(PatchItems);
785 } else {
786 // Now do the final patch:
787 PatchItem PatchItems[] = {
788 // Patch the Header.HashOffset field.
789 {HashTableStartFieldOffset, &HashTableStart, 1},
790 // Patch the Header.MemProfOffset (=0 for profiles without MemProf
791 // data).
792 {MemProfSectionOffset, &MemProfSectionStart, 1},
793 // Patch the Header.BinaryIdSectionOffset.
794 {BinaryIdSectionOffset, &BinaryIdSectionStart, 1},
795 // Patch the Header.TemporalProfTracesOffset (=0 for profiles without
796 // traces).
797 {TemporalProfTracesOffset, &TemporalProfTracesSectionStart, 1},
798 // Patch the summary data.
799 {SummaryOffset, reinterpret_cast<uint64_t *>(TheSummary.get()),
800 (int)(SummarySize / sizeof(uint64_t))},
801 {CSSummaryOffset, reinterpret_cast<uint64_t *>(TheCSSummary.get()),
802 (int)CSSummarySize}};
803
804 OS.patch(PatchItems);
805 }
806
807 for (const auto &I : FunctionData)
808 for (const auto &F : I.getValue())
809 if (Error E = validateRecord(F.second))
810 return E;
811
812 return Error::success();
813}
814
816 // Write the hash table.
817 ProfOStream POS(OS);
818 return writeImpl(POS);
819}
820
822 ProfOStream POS(OS);
823 return writeImpl(POS);
824}
825
826std::unique_ptr<MemoryBuffer> InstrProfWriter::writeBuffer() {
827 std::string Data;
829 // Write the hash table.
830 if (Error E = write(OS))
831 return nullptr;
832 // Return this in an aligned memory buffer.
834}
835
836static const char *ValueProfKindStr[] = {
837#define VALUE_PROF_KIND(Enumerator, Value, Descr) #Enumerator,
839};
840
842 for (uint32_t VK = 0; VK <= IPVK_Last; VK++) {
843 uint32_t NS = Func.getNumValueSites(VK);
844 if (!NS)
845 continue;
846 for (uint32_t S = 0; S < NS; S++) {
847 uint32_t ND = Func.getNumValueDataForSite(VK, S);
848 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
849 DenseSet<uint64_t> SeenValues;
850 for (uint32_t I = 0; I < ND; I++)
851 if ((VK != IPVK_IndirectCallTarget && VK != IPVK_VTableTarget) &&
852 !SeenValues.insert(VD[I].Value).second)
853 return make_error<InstrProfError>(instrprof_error::invalid_prof);
854 }
855 }
856
857 return Error::success();
858}
859
861 const InstrProfRecord &Func,
862 InstrProfSymtab &Symtab,
864 OS << Name << "\n";
865 OS << "# Func Hash:\n" << Hash << "\n";
866 OS << "# Num Counters:\n" << Func.Counts.size() << "\n";
867 OS << "# Counter Values:\n";
868 for (uint64_t Count : Func.Counts)
869 OS << Count << "\n";
870
871 if (Func.BitmapBytes.size() > 0) {
872 OS << "# Num Bitmap Bytes:\n$" << Func.BitmapBytes.size() << "\n";
873 OS << "# Bitmap Byte Values:\n";
874 for (uint8_t Byte : Func.BitmapBytes) {
875 OS << "0x";
876 OS.write_hex(Byte);
877 OS << "\n";
878 }
879 OS << "\n";
880 }
881
882 uint32_t NumValueKinds = Func.getNumValueKinds();
883 if (!NumValueKinds) {
884 OS << "\n";
885 return;
886 }
887
888 OS << "# Num Value Kinds:\n" << Func.getNumValueKinds() << "\n";
889 for (uint32_t VK = 0; VK < IPVK_Last + 1; VK++) {
890 uint32_t NS = Func.getNumValueSites(VK);
891 if (!NS)
892 continue;
893 OS << "# ValueKind = " << ValueProfKindStr[VK] << ":\n" << VK << "\n";
894 OS << "# NumValueSites:\n" << NS << "\n";
895 for (uint32_t S = 0; S < NS; S++) {
896 uint32_t ND = Func.getNumValueDataForSite(VK, S);
897 OS << ND << "\n";
898 std::unique_ptr<InstrProfValueData[]> VD = Func.getValueForSite(VK, S);
899 for (uint32_t I = 0; I < ND; I++) {
900 if (VK == IPVK_IndirectCallTarget || VK == IPVK_VTableTarget)
901 OS << Symtab.getFuncOrVarNameIfDefined(VD[I].Value) << ":"
902 << VD[I].Count << "\n";
903 else
904 OS << VD[I].Value << ":" << VD[I].Count << "\n";
905 }
906 }
907 }
908
909 OS << "\n";
910}
911
913 // Check CS first since it implies an IR level profile.
914 if (static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive))
915 OS << "# CSIR level Instrumentation Flag\n:csir\n";
916 else if (static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation))
917 OS << "# IR level Instrumentation Flag\n:ir\n";
918
919 if (static_cast<bool>(ProfileKind &
921 OS << "# Always instrument the function entry block\n:entry_first\n";
922 if (static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage))
923 OS << "# Instrument block coverage\n:single_byte_coverage\n";
924 InstrProfSymtab Symtab;
925
927 using RecordType = std::pair<StringRef, FuncPair>;
928 SmallVector<RecordType, 4> OrderedFuncData;
929
930 for (const auto &I : FunctionData) {
931 if (shouldEncodeData(I.getValue())) {
932 if (Error E = Symtab.addFuncName(I.getKey()))
933 return E;
934 for (const auto &Func : I.getValue())
935 OrderedFuncData.push_back(std::make_pair(I.getKey(), Func));
936 }
937 }
938
939 for (const auto &VTableName : VTableNames)
940 if (Error E = Symtab.addVTableName(VTableName.getKey()))
941 return E;
942
943 if (static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile))
945
946 llvm::sort(OrderedFuncData, [](const RecordType &A, const RecordType &B) {
947 return std::tie(A.first, A.second.first) <
948 std::tie(B.first, B.second.first);
949 });
950
951 for (const auto &record : OrderedFuncData) {
952 const StringRef &Name = record.first;
953 const FuncPair &Func = record.second;
954 writeRecordInText(Name, Func.first, Func.second, Symtab, OS);
955 }
956
957 for (const auto &record : OrderedFuncData) {
958 const FuncPair &Func = record.second;
959 if (Error E = validateRecord(Func.second))
960 return E;
961 }
962
963 return Error::success();
964}
965
967 InstrProfSymtab &Symtab) {
968 OS << ":temporal_prof_traces\n";
969 OS << "# Num Temporal Profile Traces:\n" << TemporalProfTraces.size() << "\n";
970 OS << "# Temporal Profile Trace Stream Size:\n"
971 << TemporalProfTraceStreamSize << "\n";
972 for (auto &Trace : TemporalProfTraces) {
973 OS << "# Weight:\n" << Trace.Weight << "\n";
974 for (auto &NameRef : Trace.FunctionNameRefs)
975 OS << Symtab.getFuncOrVarName(NameRef) << ",";
976 OS << "\n";
977 }
978 OS << "\n";
979}
basic Basic Alias true
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
std::string Name
static uint64_t writeMemProfRecords(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, memprof::MemProfSchema *Schema)
static Error writeMemProfV1(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static Error writeMemProfV0(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static uint64_t writeMemProfFrames(ProfOStream &OS, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData)
static Error writeMemProf(ProfOStream &OS, llvm::MapVector< GlobalValue::GUID, memprof::IndexedMemProfRecord > &MemProfRecordData, llvm::MapVector< memprof::FrameId, memprof::Frame > &MemProfFrameData, memprof::IndexedVersion MemProfVersionRequested)
static void setSummary(IndexedInstrProf::Summary *TheSummary, ProfileSummary &PS)
static const char * ValueProfKindStr[]
static void writeMemProfSchema(ProfOStream &OS, const memprof::MemProfSchema &Schema)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Defines facilities for reading and writing on-disk hash tables.
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements a set that has insertion order iteration characteristics.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:71
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
static ErrorSuccess success()
Create a success value.
Definition: Error.h:334
static std::pair< offset_type, offset_type > EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V)
const InstrProfWriter::ProfilingData *const data_type_ref
InstrProfSummaryBuilder * SummaryBuilder
static hash_value_type ComputeHash(key_type_ref K)
InstrProfSummaryBuilder * CSSummaryBuilder
void EmitKey(raw_ostream &Out, key_type_ref K, offset_type N)
void EmitData(raw_ostream &Out, key_type_ref, data_type_ref V, offset_type)
const InstrProfWriter::ProfilingData *const data_type
void addRecord(const InstrProfRecord &)
A symbol table used for function [IR]PGO name look-up with keys (such as pointers,...
Definition: InstrProf.h:451
StringRef getFuncOrVarName(uint64_t ValMD5Hash)
Return name of functions or global variables from the name's md5 hash value.
Definition: InstrProf.h:692
Error addVTableName(StringRef VTableName)
Adds VTableName as a known symbol, and inserts it to a map that tracks all vtable names.
Definition: InstrProf.h:580
Error addFuncName(StringRef FuncName)
The method name is kept since there are many callers.
Definition: InstrProf.h:576
StringRef getFuncOrVarNameIfDefined(uint64_t ValMD5Hash)
Just like getFuncOrVarName, except that it will return literal string 'External Symbol' if the functi...
Definition: InstrProf.h:685
Error write(raw_fd_ostream &OS)
Write the profile to OS.
void addTemporalProfileTraces(SmallVectorImpl< TemporalProfTraceTy > &SrcTraces, uint64_t SrcStreamSize)
Add SrcTraces using reservoir sampling where SrcStreamSize is the total number of temporal profiling ...
void overlapRecord(NamedInstrProfRecord &&Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, const OverlapFuncFilters &FuncFilter)
Error writeText(raw_fd_ostream &OS)
Write the profile in text format to OS.
InstrProfWriter(bool Sparse=false, uint64_t TemporalProfTraceReservoirSize=0, uint64_t MaxTemporalProfTraceLength=0, bool WritePrevVersion=false, memprof::IndexedVersion MemProfVersionRequested=memprof::Version0)
void addBinaryIds(ArrayRef< llvm::object::BuildID > BIs)
void addMemProfRecord(const GlobalValue::GUID Id, const memprof::IndexedMemProfRecord &Record)
Add a memprof record for a function identified by its Id.
static void writeRecordInText(StringRef Name, uint64_t Hash, const InstrProfRecord &Counters, InstrProfSymtab &Symtab, raw_fd_ostream &OS)
Write Record in text format to OS.
void setValueProfDataEndianness(llvm::endianness Endianness)
void addRecord(NamedInstrProfRecord &&I, uint64_t Weight, function_ref< void(Error)> Warn)
Add function counts for the given function.
void mergeRecordsFromWriter(InstrProfWriter &&IPW, function_ref< void(Error)> Warn)
Merge existing function counts from the given writer.
void writeTextTemporalProfTraceData(raw_fd_ostream &OS, InstrProfSymtab &Symtab)
Write temporal profile trace data to the header in text format to OS.
std::unique_ptr< MemoryBuffer > writeBuffer()
Write the profile, returning the raw data. For testing.
bool addMemProfFrame(const memprof::FrameId, const memprof::Frame &F, function_ref< void(Error)> Warn)
Add a memprof frame identified by the hash of the contents of the frame in FrameId.
void setOutputSparse(bool Sparse)
Error validateRecord(const InstrProfRecord &Func)
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
void clear()
Definition: MapVector.h:88
static std::unique_ptr< MemoryBuffer > getMemBufferCopy(StringRef InputData, const Twine &BufferName="")
Open the specified memory range as a MemoryBuffer, copying the contents and taking ownership of it.
Generates an on disk hash table.
offset_type Emit(raw_ostream &Out)
Emit the table to Out, which must not be at offset 0.
void writeByte(uint8_t V)
void patch(ArrayRef< PatchItem > P)
ProfOStream(raw_string_ostream &STR)
support::endian::Writer LE
ProfOStream(raw_fd_ostream &FD)
void write(uint64_t V)
static const ArrayRef< uint32_t > DefaultCutoffs
A vector of useful cutoff values for detailed summary.
Definition: ProfileCommon.h:70
uint64_t getTotalCount() const
uint64_t getMaxCount() const
const SummaryEntryVector & getDetailedSummary()
uint32_t getNumCounts() const
uint64_t getMaxInternalCount() const
uint64_t getMaxFunctionCount() const
uint32_t getNumFunctions() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
iterator_range< StringMapKeyIterator< ValueTy > > keys() const
Definition: StringMap.h:228
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool empty() const
Definition: Trace.h:96
unsigned size() const
Definition: Trace.h:95
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
An efficient, type-erasing, non-owning reference to a callable.
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:470
uint64_t seek(uint64_t off)
Flushes the stream and repositions the underlying file descriptor position to the offset specified fr...
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
uint64_t tell() const
tell - Return the current offset with the file.
Definition: raw_ostream.h:150
raw_ostream & write_hex(unsigned long long N)
Output N in hexadecimal, without any prefix or padding.
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:678
std::unique_ptr< Summary > allocSummary(uint32_t TotalSize)
Definition: InstrProf.h:1275
uint64_t ComputeHash(StringRef K)
Definition: InstrProf.h:1157
const uint64_t Magic
Definition: InstrProf.h:1114
const HashT HashType
Definition: InstrProf.h:1155
constexpr uint64_t MaximumSupportedVersion
Definition: MemProf.h:30
constexpr uint64_t MinimumSupportedVersion
Definition: MemProf.h:29
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:853
uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align)
Definition: MathExtras.h:382
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
void shuffle(Iterator first, Iterator last, RNG &&g)
Definition: STLExtras.h:1541
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
@ Other
Any other memory.
instrprof_error
Definition: InstrProf.h:346
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
Error collectGlobalObjectNameStrings(ArrayRef< std::string > NameStrs, bool doCompression, std::string &Result)
Given a vector of strings (names of global objects like functions or, virtual tables) NameStrs,...
Definition: InstrProf.cpp:626
endianness
Definition: bit.h:70
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
uint64_t Pos
uint64_t * D
Helper object to track which of three possible relocation mechanisms are used for a particular value ...
void set(SummaryFieldKind K, uint64_t V)
Definition: InstrProf.h:1261
void setEntry(uint32_t I, const ProfileSummaryEntry &E)
Definition: InstrProf.h:1267
Profiling information for a single function.
Definition: InstrProf.h:808
std::vector< uint64_t > Counts
Definition: InstrProf.h:809
void merge(InstrProfRecord &Other, uint64_t Weight, function_ref< void(instrprof_error)> Warn)
Merge the counts in Other into this one.
Definition: InstrProf.cpp:864
void overlap(InstrProfRecord &Other, OverlapStats &Overlap, OverlapStats &FuncLevelOverlap, uint64_t ValueCutoff)
Compute the overlap b/w this IntrprofRecord and Other.
Definition: InstrProf.cpp:760
void sortValueData()
Sort value profile data (per site) by count.
Definition: InstrProf.h:883
std::vector< uint8_t > BitmapBytes
Definition: InstrProf.h:810
void scale(uint64_t N, uint64_t D, function_ref< void(instrprof_error)> Warn)
Scale up profile counts (including value profile data) by a factor of (N / D).
Definition: InstrProf.cpp:927
static bool hasCSFlagInHash(uint64_t FuncHash)
Definition: InstrProf.h:1016
const std::string NameFilter
Definition: InstrProf.h:773
void addOneMismatch(const CountSumOrPercent &MismatchFunc)
Definition: InstrProf.cpp:1492
CountSumOrPercent Overlap
Definition: InstrProf.h:735
void addOneUnique(const CountSumOrPercent &UniqueFunc)
Definition: InstrProf.cpp:1502
CountSumOrPercent Test
Definition: InstrProf.h:733
An ordered list of functions identified by their NameRef found in INSTR_PROF_DATA.
Definition: InstrProf.h:377
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450
void merge(const IndexedMemProfRecord &Other)
Definition: MemProf.h:362
static MemProfSchema getSchema()
Definition: MemProf.h:118
Adapter to write values to a stream in a particular byte order.
Definition: EndianStream.h:67
void write(ArrayRef< value_type > Val)
Definition: EndianStream.h:71