LLVM 18.0.0git
CodeLayout.cpp
Go to the documentation of this file.
1//===- CodeLayout.cpp - Implementation of code layout algorithms ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The file implements "cache-aware" layout algorithms of basic blocks and
10// functions in a binary.
11//
12// The algorithm tries to find a layout of nodes (basic blocks) of a given CFG
13// optimizing jump locality and thus processor I-cache utilization. This is
14// achieved via increasing the number of fall-through jumps and co-locating
15// frequently executed nodes together. The name follows the underlying
16// optimization problem, Extended-TSP, which is a generalization of classical
17// (maximum) Traveling Salesmen Problem.
18//
19// The algorithm is a greedy heuristic that works with chains (ordered lists)
20// of basic blocks. Initially all chains are isolated basic blocks. On every
21// iteration, we pick a pair of chains whose merging yields the biggest increase
22// in the ExtTSP score, which models how i-cache "friendly" a specific chain is.
23// A pair of chains giving the maximum gain is merged into a new chain. The
24// procedure stops when there is only one chain left, or when merging does not
25// increase ExtTSP. In the latter case, the remaining chains are sorted by
26// density in the decreasing order.
27//
28// An important aspect is the way two chains are merged. Unlike earlier
29// algorithms (e.g., based on the approach of Pettis-Hansen), two
30// chains, X and Y, are first split into three, X1, X2, and Y. Then we
31// consider all possible ways of gluing the three chains (e.g., X1YX2, X1X2Y,
32// X2X1Y, X2YX1, YX1X2, YX2X1) and choose the one producing the largest score.
33// This improves the quality of the final result (the search space is larger)
34// while keeping the implementation sufficiently fast.
35//
36// Reference:
37// * A. Newell and S. Pupyrev, Improved Basic Block Reordering,
38// IEEE Transactions on Computers, 2020
39// https://arxiv.org/abs/1809.04676
40//
41//===----------------------------------------------------------------------===//
42
45#include "llvm/Support/Debug.h"
46
47#include <cmath>
48#include <set>
49
50using namespace llvm;
51using namespace llvm::codelayout;
52
53#define DEBUG_TYPE "code-layout"
54
55namespace llvm {
57 "enable-ext-tsp-block-placement", cl::Hidden, cl::init(false),
58 cl::desc("Enable machine block placement based on the ext-tsp model, "
59 "optimizing I-cache utilization."));
60
62 "ext-tsp-apply-without-profile",
63 cl::desc("Whether to apply ext-tsp placement for instances w/o profile"),
64 cl::init(true), cl::Hidden);
65} // namespace llvm
66
67// Algorithm-specific params for Ext-TSP. The values are tuned for the best
68// performance of large-scale front-end bound binaries.
70 "ext-tsp-forward-weight-cond", cl::ReallyHidden, cl::init(0.1),
71 cl::desc("The weight of conditional forward jumps for ExtTSP value"));
72
74 "ext-tsp-forward-weight-uncond", cl::ReallyHidden, cl::init(0.1),
75 cl::desc("The weight of unconditional forward jumps for ExtTSP value"));
76
78 "ext-tsp-backward-weight-cond", cl::ReallyHidden, cl::init(0.1),
79 cl::desc("The weight of conditional backward jumps for ExtTSP value"));
80
82 "ext-tsp-backward-weight-uncond", cl::ReallyHidden, cl::init(0.1),
83 cl::desc("The weight of unconditional backward jumps for ExtTSP value"));
84
86 "ext-tsp-fallthrough-weight-cond", cl::ReallyHidden, cl::init(1.0),
87 cl::desc("The weight of conditional fallthrough jumps for ExtTSP value"));
88
90 "ext-tsp-fallthrough-weight-uncond", cl::ReallyHidden, cl::init(1.05),
91 cl::desc("The weight of unconditional fallthrough jumps for ExtTSP value"));
92
94 "ext-tsp-forward-distance", cl::ReallyHidden, cl::init(1024),
95 cl::desc("The maximum distance (in bytes) of a forward jump for ExtTSP"));
96
98 "ext-tsp-backward-distance", cl::ReallyHidden, cl::init(640),
99 cl::desc("The maximum distance (in bytes) of a backward jump for ExtTSP"));
100
101// The maximum size of a chain created by the algorithm. The size is bounded
102// so that the algorithm can efficiently process extremely large instance.
104 MaxChainSize("ext-tsp-max-chain-size", cl::ReallyHidden, cl::init(4096),
105 cl::desc("The maximum size of a chain to create."));
106
107// The maximum size of a chain for splitting. Larger values of the threshold
108// may yield better quality at the cost of worsen run-time.
110 "ext-tsp-chain-split-threshold", cl::ReallyHidden, cl::init(128),
111 cl::desc("The maximum size of a chain to apply splitting"));
112
113// The option enables splitting (large) chains along in-coming and out-going
114// jumps. This typically results in a better quality.
116 "ext-tsp-enable-chain-split-along-jumps", cl::ReallyHidden, cl::init(true),
117 cl::desc("The maximum size of a chain to apply splitting"));
118
119// Algorithm-specific options for CDS.
121 cl::desc("The size of the cache"));
122
124 cl::desc("The size of a line in the cache"));
125
127 "cds-distance-power", cl::ReallyHidden,
128 cl::desc("The power exponent for the distance-based locality"));
129
131 "cds-frequency-scale", cl::ReallyHidden,
132 cl::desc("The scale factor for the frequency-based locality"));
133
134namespace {
135
136// Epsilon for comparison of doubles.
137constexpr double EPS = 1e-8;
138
139// Compute the Ext-TSP score for a given jump.
140double jumpExtTSPScore(uint64_t JumpDist, uint64_t JumpMaxDist, uint64_t Count,
141 double Weight) {
142 if (JumpDist > JumpMaxDist)
143 return 0;
144 double Prob = 1.0 - static_cast<double>(JumpDist) / JumpMaxDist;
145 return Weight * Prob * Count;
146}
147
148// Compute the Ext-TSP score for a jump between a given pair of blocks,
149// using their sizes, (estimated) addresses and the jump execution count.
150double extTSPScore(uint64_t SrcAddr, uint64_t SrcSize, uint64_t DstAddr,
151 uint64_t Count, bool IsConditional) {
152 // Fallthrough
153 if (SrcAddr + SrcSize == DstAddr) {
154 return jumpExtTSPScore(0, 1, Count,
155 IsConditional ? FallthroughWeightCond
157 }
158 // Forward
159 if (SrcAddr + SrcSize < DstAddr) {
160 const uint64_t Dist = DstAddr - (SrcAddr + SrcSize);
161 return jumpExtTSPScore(Dist, ForwardDistance, Count,
162 IsConditional ? ForwardWeightCond
164 }
165 // Backward
166 const uint64_t Dist = SrcAddr + SrcSize - DstAddr;
167 return jumpExtTSPScore(Dist, BackwardDistance, Count,
168 IsConditional ? BackwardWeightCond
170}
171
172/// A type of merging two chains, X and Y. The former chain is split into
173/// X1 and X2 and then concatenated with Y in the order specified by the type.
174enum class MergeTypeT : int { X_Y, Y_X, X1_Y_X2, Y_X2_X1, X2_X1_Y };
175
176/// The gain of merging two chains, that is, the Ext-TSP score of the merge
177/// together with the corresponding merge 'type' and 'offset'.
178struct MergeGainT {
179 explicit MergeGainT() = default;
180 explicit MergeGainT(double Score, size_t MergeOffset, MergeTypeT MergeType)
181 : Score(Score), MergeOffset(MergeOffset), MergeType(MergeType) {}
182
183 double score() const { return Score; }
184
185 size_t mergeOffset() const { return MergeOffset; }
186
187 MergeTypeT mergeType() const { return MergeType; }
188
189 void setMergeType(MergeTypeT Ty) { MergeType = Ty; }
190
191 // Returns 'true' iff Other is preferred over this.
192 bool operator<(const MergeGainT &Other) const {
193 return (Other.Score > EPS && Other.Score > Score + EPS);
194 }
195
196 // Update the current gain if Other is preferred over this.
197 void updateIfLessThan(const MergeGainT &Other) {
198 if (*this < Other)
199 *this = Other;
200 }
201
202private:
203 double Score{-1.0};
204 size_t MergeOffset{0};
205 MergeTypeT MergeType{MergeTypeT::X_Y};
206};
207
208struct JumpT;
209struct ChainT;
210struct ChainEdge;
211
212/// A node in the graph, typically corresponding to a basic block in the CFG or
213/// a function in the call graph.
214struct NodeT {
215 NodeT(const NodeT &) = delete;
216 NodeT(NodeT &&) = default;
217 NodeT &operator=(const NodeT &) = delete;
218 NodeT &operator=(NodeT &&) = default;
219
220 explicit NodeT(size_t Index, uint64_t Size, uint64_t EC)
221 : Index(Index), Size(Size), ExecutionCount(EC) {}
222
223 bool isEntry() const { return Index == 0; }
224
225 // The total execution count of outgoing jumps.
226 uint64_t outCount() const;
227
228 // The total execution count of incoming jumps.
229 uint64_t inCount() const;
230
231 // The original index of the node in graph.
232 size_t Index{0};
233 // The index of the node in the current chain.
234 size_t CurIndex{0};
235 // The size of the node in the binary.
236 uint64_t Size{0};
237 // The execution count of the node in the profile data.
238 uint64_t ExecutionCount{0};
239 // The current chain of the node.
240 ChainT *CurChain{nullptr};
241 // The offset of the node in the current chain.
242 mutable uint64_t EstimatedAddr{0};
243 // Forced successor of the node in the graph.
244 NodeT *ForcedSucc{nullptr};
245 // Forced predecessor of the node in the graph.
246 NodeT *ForcedPred{nullptr};
247 // Outgoing jumps from the node.
248 std::vector<JumpT *> OutJumps;
249 // Incoming jumps to the node.
250 std::vector<JumpT *> InJumps;
251};
252
253/// An arc in the graph, typically corresponding to a jump between two nodes.
254struct JumpT {
255 JumpT(const JumpT &) = delete;
256 JumpT(JumpT &&) = default;
257 JumpT &operator=(const JumpT &) = delete;
258 JumpT &operator=(JumpT &&) = default;
259
260 explicit JumpT(NodeT *Source, NodeT *Target, uint64_t ExecutionCount)
261 : Source(Source), Target(Target), ExecutionCount(ExecutionCount) {}
262
263 // Source node of the jump.
264 NodeT *Source;
265 // Target node of the jump.
266 NodeT *Target;
267 // Execution count of the arc in the profile data.
268 uint64_t ExecutionCount{0};
269 // Whether the jump corresponds to a conditional branch.
270 bool IsConditional{false};
271 // The offset of the jump from the source node.
272 uint64_t Offset{0};
273};
274
275/// A chain (ordered sequence) of nodes in the graph.
276struct ChainT {
277 ChainT(const ChainT &) = delete;
278 ChainT(ChainT &&) = default;
279 ChainT &operator=(const ChainT &) = delete;
280 ChainT &operator=(ChainT &&) = default;
281
282 explicit ChainT(uint64_t Id, NodeT *Node)
283 : Id(Id), ExecutionCount(Node->ExecutionCount), Size(Node->Size),
284 Nodes(1, Node) {}
285
286 size_t numBlocks() const { return Nodes.size(); }
287
288 double density() const { return static_cast<double>(ExecutionCount) / Size; }
289
290 bool isEntry() const { return Nodes[0]->Index == 0; }
291
292 bool isCold() const {
293 for (NodeT *Node : Nodes) {
294 if (Node->ExecutionCount > 0)
295 return false;
296 }
297 return true;
298 }
299
300 ChainEdge *getEdge(ChainT *Other) const {
301 for (const auto &[Chain, ChainEdge] : Edges) {
302 if (Chain == Other)
303 return ChainEdge;
304 }
305 return nullptr;
306 }
307
308 void removeEdge(ChainT *Other) {
309 auto It = Edges.begin();
310 while (It != Edges.end()) {
311 if (It->first == Other) {
312 Edges.erase(It);
313 return;
314 }
315 It++;
316 }
317 }
318
319 void addEdge(ChainT *Other, ChainEdge *Edge) {
320 Edges.push_back(std::make_pair(Other, Edge));
321 }
322
323 void merge(ChainT *Other, std::vector<NodeT *> MergedBlocks) {
324 Nodes = std::move(MergedBlocks);
325 // Update the chain's data.
326 ExecutionCount += Other->ExecutionCount;
327 Size += Other->Size;
328 Id = Nodes[0]->Index;
329 // Update the node's data.
330 for (size_t Idx = 0; Idx < Nodes.size(); Idx++) {
331 Nodes[Idx]->CurChain = this;
332 Nodes[Idx]->CurIndex = Idx;
333 }
334 }
335
336 void mergeEdges(ChainT *Other);
337
338 void clear() {
339 Nodes.clear();
340 Nodes.shrink_to_fit();
341 Edges.clear();
342 Edges.shrink_to_fit();
343 }
344
345 // Unique chain identifier.
346 uint64_t Id;
347 // Cached ext-tsp score for the chain.
348 double Score{0};
349 // The total execution count of the chain.
350 uint64_t ExecutionCount{0};
351 // The total size of the chain.
352 uint64_t Size{0};
353 // Nodes of the chain.
354 std::vector<NodeT *> Nodes;
355 // Adjacent chains and corresponding edges (lists of jumps).
356 std::vector<std::pair<ChainT *, ChainEdge *>> Edges;
357};
358
359/// An edge in the graph representing jumps between two chains.
360/// When nodes are merged into chains, the edges are combined too so that
361/// there is always at most one edge between a pair of chains.
362struct ChainEdge {
363 ChainEdge(const ChainEdge &) = delete;
364 ChainEdge(ChainEdge &&) = default;
365 ChainEdge &operator=(const ChainEdge &) = delete;
366 ChainEdge &operator=(ChainEdge &&) = delete;
367
368 explicit ChainEdge(JumpT *Jump)
369 : SrcChain(Jump->Source->CurChain), DstChain(Jump->Target->CurChain),
370 Jumps(1, Jump) {}
371
372 ChainT *srcChain() const { return SrcChain; }
373
374 ChainT *dstChain() const { return DstChain; }
375
376 bool isSelfEdge() const { return SrcChain == DstChain; }
377
378 const std::vector<JumpT *> &jumps() const { return Jumps; }
379
380 void appendJump(JumpT *Jump) { Jumps.push_back(Jump); }
381
382 void moveJumps(ChainEdge *Other) {
383 Jumps.insert(Jumps.end(), Other->Jumps.begin(), Other->Jumps.end());
384 Other->Jumps.clear();
385 Other->Jumps.shrink_to_fit();
386 }
387
388 void changeEndpoint(ChainT *From, ChainT *To) {
389 if (From == SrcChain)
390 SrcChain = To;
391 if (From == DstChain)
392 DstChain = To;
393 }
394
395 bool hasCachedMergeGain(ChainT *Src, ChainT *Dst) const {
396 return Src == SrcChain ? CacheValidForward : CacheValidBackward;
397 }
398
399 MergeGainT getCachedMergeGain(ChainT *Src, ChainT *Dst) const {
400 return Src == SrcChain ? CachedGainForward : CachedGainBackward;
401 }
402
403 void setCachedMergeGain(ChainT *Src, ChainT *Dst, MergeGainT MergeGain) {
404 if (Src == SrcChain) {
405 CachedGainForward = MergeGain;
406 CacheValidForward = true;
407 } else {
408 CachedGainBackward = MergeGain;
409 CacheValidBackward = true;
410 }
411 }
412
413 void invalidateCache() {
414 CacheValidForward = false;
415 CacheValidBackward = false;
416 }
417
418 void setMergeGain(MergeGainT Gain) { CachedGain = Gain; }
419
420 MergeGainT getMergeGain() const { return CachedGain; }
421
422 double gain() const { return CachedGain.score(); }
423
424private:
425 // Source chain.
426 ChainT *SrcChain{nullptr};
427 // Destination chain.
428 ChainT *DstChain{nullptr};
429 // Original jumps in the binary with corresponding execution counts.
430 std::vector<JumpT *> Jumps;
431 // Cached gain value for merging the pair of chains.
432 MergeGainT CachedGain;
433
434 // Cached gain values for merging the pair of chains. Since the gain of
435 // merging (Src, Dst) and (Dst, Src) might be different, we store both values
436 // here and a flag indicating which of the options results in a higher gain.
437 // Cached gain values.
438 MergeGainT CachedGainForward;
439 MergeGainT CachedGainBackward;
440 // Whether the cached value must be recomputed.
441 bool CacheValidForward{false};
442 bool CacheValidBackward{false};
443};
444
445uint64_t NodeT::outCount() const {
446 uint64_t Count = 0;
447 for (JumpT *Jump : OutJumps)
448 Count += Jump->ExecutionCount;
449 return Count;
450}
451
452uint64_t NodeT::inCount() const {
453 uint64_t Count = 0;
454 for (JumpT *Jump : InJumps)
455 Count += Jump->ExecutionCount;
456 return Count;
457}
458
459void ChainT::mergeEdges(ChainT *Other) {
460 // Update edges adjacent to chain Other.
461 for (const auto &[DstChain, DstEdge] : Other->Edges) {
462 ChainT *TargetChain = DstChain == Other ? this : DstChain;
463 ChainEdge *CurEdge = getEdge(TargetChain);
464 if (CurEdge == nullptr) {
465 DstEdge->changeEndpoint(Other, this);
466 this->addEdge(TargetChain, DstEdge);
467 if (DstChain != this && DstChain != Other)
468 DstChain->addEdge(this, DstEdge);
469 } else {
470 CurEdge->moveJumps(DstEdge);
471 }
472 // Cleanup leftover edge.
473 if (DstChain != Other)
474 DstChain->removeEdge(Other);
475 }
476}
477
478using NodeIter = std::vector<NodeT *>::const_iterator;
479
480/// A wrapper around three chains of nodes; it is used to avoid extra
481/// instantiation of the vectors.
482struct MergedChain {
483 MergedChain(NodeIter Begin1, NodeIter End1, NodeIter Begin2 = NodeIter(),
484 NodeIter End2 = NodeIter(), NodeIter Begin3 = NodeIter(),
485 NodeIter End3 = NodeIter())
486 : Begin1(Begin1), End1(End1), Begin2(Begin2), End2(End2), Begin3(Begin3),
487 End3(End3) {}
488
489 template <typename F> void forEach(const F &Func) const {
490 for (auto It = Begin1; It != End1; It++)
491 Func(*It);
492 for (auto It = Begin2; It != End2; It++)
493 Func(*It);
494 for (auto It = Begin3; It != End3; It++)
495 Func(*It);
496 }
497
498 std::vector<NodeT *> getNodes() const {
499 std::vector<NodeT *> Result;
500 Result.reserve(std::distance(Begin1, End1) + std::distance(Begin2, End2) +
501 std::distance(Begin3, End3));
502 Result.insert(Result.end(), Begin1, End1);
503 Result.insert(Result.end(), Begin2, End2);
504 Result.insert(Result.end(), Begin3, End3);
505 return Result;
506 }
507
508 const NodeT *getFirstNode() const { return *Begin1; }
509
510private:
511 NodeIter Begin1;
512 NodeIter End1;
513 NodeIter Begin2;
514 NodeIter End2;
515 NodeIter Begin3;
516 NodeIter End3;
517};
518
519/// Merge two chains of nodes respecting a given 'type' and 'offset'.
520///
521/// If MergeType == 0, then the result is a concatenation of two chains.
522/// Otherwise, the first chain is cut into two sub-chains at the offset,
523/// and merged using all possible ways of concatenating three chains.
524MergedChain mergeNodes(const std::vector<NodeT *> &X,
525 const std::vector<NodeT *> &Y, size_t MergeOffset,
526 MergeTypeT MergeType) {
527 // Split the first chain, X, into X1 and X2.
528 NodeIter BeginX1 = X.begin();
529 NodeIter EndX1 = X.begin() + MergeOffset;
530 NodeIter BeginX2 = X.begin() + MergeOffset;
531 NodeIter EndX2 = X.end();
532 NodeIter BeginY = Y.begin();
533 NodeIter EndY = Y.end();
534
535 // Construct a new chain from the three existing ones.
536 switch (MergeType) {
537 case MergeTypeT::X_Y:
538 return MergedChain(BeginX1, EndX2, BeginY, EndY);
539 case MergeTypeT::Y_X:
540 return MergedChain(BeginY, EndY, BeginX1, EndX2);
541 case MergeTypeT::X1_Y_X2:
542 return MergedChain(BeginX1, EndX1, BeginY, EndY, BeginX2, EndX2);
543 case MergeTypeT::Y_X2_X1:
544 return MergedChain(BeginY, EndY, BeginX2, EndX2, BeginX1, EndX1);
545 case MergeTypeT::X2_X1_Y:
546 return MergedChain(BeginX2, EndX2, BeginX1, EndX1, BeginY, EndY);
547 }
548 llvm_unreachable("unexpected chain merge type");
549}
550
551/// The implementation of the ExtTSP algorithm.
552class ExtTSPImpl {
553public:
554 ExtTSPImpl(ArrayRef<uint64_t> NodeSizes, ArrayRef<uint64_t> NodeCounts,
555 ArrayRef<EdgeCount> EdgeCounts)
556 : NumNodes(NodeSizes.size()) {
557 initialize(NodeSizes, NodeCounts, EdgeCounts);
558 }
559
560 /// Run the algorithm and return an optimized ordering of nodes.
561 std::vector<uint64_t> run() {
562 // Pass 1: Merge nodes with their mutually forced successors
563 mergeForcedPairs();
564
565 // Pass 2: Merge pairs of chains while improving the ExtTSP objective
566 mergeChainPairs();
567
568 // Pass 3: Merge cold nodes to reduce code size
569 mergeColdChains();
570
571 // Collect nodes from all chains
572 return concatChains();
573 }
574
575private:
576 /// Initialize the algorithm's data structures.
577 void initialize(const ArrayRef<uint64_t> &NodeSizes,
578 const ArrayRef<uint64_t> &NodeCounts,
579 const ArrayRef<EdgeCount> &EdgeCounts) {
580 // Initialize nodes
581 AllNodes.reserve(NumNodes);
582 for (uint64_t Idx = 0; Idx < NumNodes; Idx++) {
583 uint64_t Size = std::max<uint64_t>(NodeSizes[Idx], 1ULL);
584 uint64_t ExecutionCount = NodeCounts[Idx];
585 // The execution count of the entry node is set to at least one.
586 if (Idx == 0 && ExecutionCount == 0)
587 ExecutionCount = 1;
588 AllNodes.emplace_back(Idx, Size, ExecutionCount);
589 }
590
591 // Initialize jumps between nodes
592 SuccNodes.resize(NumNodes);
593 PredNodes.resize(NumNodes);
594 std::vector<uint64_t> OutDegree(NumNodes, 0);
595 AllJumps.reserve(EdgeCounts.size());
596 for (auto Edge : EdgeCounts) {
597 ++OutDegree[Edge.src];
598 // Ignore self-edges.
599 if (Edge.src == Edge.dst)
600 continue;
601
602 SuccNodes[Edge.src].push_back(Edge.dst);
603 PredNodes[Edge.dst].push_back(Edge.src);
604 if (Edge.count > 0) {
605 NodeT &PredNode = AllNodes[Edge.src];
606 NodeT &SuccNode = AllNodes[Edge.dst];
607 AllJumps.emplace_back(&PredNode, &SuccNode, Edge.count);
608 SuccNode.InJumps.push_back(&AllJumps.back());
609 PredNode.OutJumps.push_back(&AllJumps.back());
610 }
611 }
612 for (JumpT &Jump : AllJumps) {
613 assert(OutDegree[Jump.Source->Index] > 0);
614 Jump.IsConditional = OutDegree[Jump.Source->Index] > 1;
615 }
616
617 // Initialize chains.
618 AllChains.reserve(NumNodes);
619 HotChains.reserve(NumNodes);
620 for (NodeT &Node : AllNodes) {
621 AllChains.emplace_back(Node.Index, &Node);
622 Node.CurChain = &AllChains.back();
623 if (Node.ExecutionCount > 0)
624 HotChains.push_back(&AllChains.back());
625 }
626
627 // Initialize chain edges.
628 AllEdges.reserve(AllJumps.size());
629 for (NodeT &PredNode : AllNodes) {
630 for (JumpT *Jump : PredNode.OutJumps) {
631 NodeT *SuccNode = Jump->Target;
632 ChainEdge *CurEdge = PredNode.CurChain->getEdge(SuccNode->CurChain);
633 // this edge is already present in the graph.
634 if (CurEdge != nullptr) {
635 assert(SuccNode->CurChain->getEdge(PredNode.CurChain) != nullptr);
636 CurEdge->appendJump(Jump);
637 continue;
638 }
639 // this is a new edge.
640 AllEdges.emplace_back(Jump);
641 PredNode.CurChain->addEdge(SuccNode->CurChain, &AllEdges.back());
642 SuccNode->CurChain->addEdge(PredNode.CurChain, &AllEdges.back());
643 }
644 }
645 }
646
647 /// For a pair of nodes, A and B, node B is the forced successor of A,
648 /// if (i) all jumps (based on profile) from A goes to B and (ii) all jumps
649 /// to B are from A. Such nodes should be adjacent in the optimal ordering;
650 /// the method finds and merges such pairs of nodes.
651 void mergeForcedPairs() {
652 // Find fallthroughs based on edge weights.
653 for (NodeT &Node : AllNodes) {
654 if (SuccNodes[Node.Index].size() == 1 &&
655 PredNodes[SuccNodes[Node.Index][0]].size() == 1 &&
656 SuccNodes[Node.Index][0] != 0) {
657 size_t SuccIndex = SuccNodes[Node.Index][0];
658 Node.ForcedSucc = &AllNodes[SuccIndex];
659 AllNodes[SuccIndex].ForcedPred = &Node;
660 }
661 }
662
663 // There might be 'cycles' in the forced dependencies, since profile
664 // data isn't 100% accurate. Typically this is observed in loops, when the
665 // loop edges are the hottest successors for the basic blocks of the loop.
666 // Break the cycles by choosing the node with the smallest index as the
667 // head. This helps to keep the original order of the loops, which likely
668 // have already been rotated in the optimized manner.
669 for (NodeT &Node : AllNodes) {
670 if (Node.ForcedSucc == nullptr || Node.ForcedPred == nullptr)
671 continue;
672
673 NodeT *SuccNode = Node.ForcedSucc;
674 while (SuccNode != nullptr && SuccNode != &Node) {
675 SuccNode = SuccNode->ForcedSucc;
676 }
677 if (SuccNode == nullptr)
678 continue;
679 // Break the cycle.
680 AllNodes[Node.ForcedPred->Index].ForcedSucc = nullptr;
681 Node.ForcedPred = nullptr;
682 }
683
684 // Merge nodes with their fallthrough successors.
685 for (NodeT &Node : AllNodes) {
686 if (Node.ForcedPred == nullptr && Node.ForcedSucc != nullptr) {
687 const NodeT *CurBlock = &Node;
688 while (CurBlock->ForcedSucc != nullptr) {
689 const NodeT *NextBlock = CurBlock->ForcedSucc;
690 mergeChains(Node.CurChain, NextBlock->CurChain, 0, MergeTypeT::X_Y);
691 CurBlock = NextBlock;
692 }
693 }
694 }
695 }
696
697 /// Merge pairs of chains while improving the ExtTSP objective.
698 void mergeChainPairs() {
699 /// Deterministically compare pairs of chains.
700 auto compareChainPairs = [](const ChainT *A1, const ChainT *B1,
701 const ChainT *A2, const ChainT *B2) {
702 if (A1 != A2)
703 return A1->Id < A2->Id;
704 return B1->Id < B2->Id;
705 };
706
707 while (HotChains.size() > 1) {
708 ChainT *BestChainPred = nullptr;
709 ChainT *BestChainSucc = nullptr;
710 MergeGainT BestGain;
711 // Iterate over all pairs of chains.
712 for (ChainT *ChainPred : HotChains) {
713 // Get candidates for merging with the current chain.
714 for (const auto &[ChainSucc, Edge] : ChainPred->Edges) {
715 // Ignore loop edges.
716 if (ChainPred == ChainSucc)
717 continue;
718
719 // Stop early if the combined chain violates the maximum allowed size.
720 if (ChainPred->numBlocks() + ChainSucc->numBlocks() >= MaxChainSize)
721 continue;
722
723 // Compute the gain of merging the two chains.
724 MergeGainT CurGain = getBestMergeGain(ChainPred, ChainSucc, Edge);
725 if (CurGain.score() <= EPS)
726 continue;
727
728 if (BestGain < CurGain ||
729 (std::abs(CurGain.score() - BestGain.score()) < EPS &&
730 compareChainPairs(ChainPred, ChainSucc, BestChainPred,
731 BestChainSucc))) {
732 BestGain = CurGain;
733 BestChainPred = ChainPred;
734 BestChainSucc = ChainSucc;
735 }
736 }
737 }
738
739 // Stop merging when there is no improvement.
740 if (BestGain.score() <= EPS)
741 break;
742
743 // Merge the best pair of chains.
744 mergeChains(BestChainPred, BestChainSucc, BestGain.mergeOffset(),
745 BestGain.mergeType());
746 }
747 }
748
749 /// Merge remaining nodes into chains w/o taking jump counts into
750 /// consideration. This allows to maintain the original node order in the
751 /// absence of profile data.
752 void mergeColdChains() {
753 for (size_t SrcBB = 0; SrcBB < NumNodes; SrcBB++) {
754 // Iterating in reverse order to make sure original fallthrough jumps are
755 // merged first; this might be beneficial for code size.
756 size_t NumSuccs = SuccNodes[SrcBB].size();
757 for (size_t Idx = 0; Idx < NumSuccs; Idx++) {
758 size_t DstBB = SuccNodes[SrcBB][NumSuccs - Idx - 1];
759 ChainT *SrcChain = AllNodes[SrcBB].CurChain;
760 ChainT *DstChain = AllNodes[DstBB].CurChain;
761 if (SrcChain != DstChain && !DstChain->isEntry() &&
762 SrcChain->Nodes.back()->Index == SrcBB &&
763 DstChain->Nodes.front()->Index == DstBB &&
764 SrcChain->isCold() == DstChain->isCold()) {
765 mergeChains(SrcChain, DstChain, 0, MergeTypeT::X_Y);
766 }
767 }
768 }
769 }
770
771 /// Compute the Ext-TSP score for a given node order and a list of jumps.
772 double extTSPScore(const MergedChain &MergedBlocks,
773 const std::vector<JumpT *> &Jumps) const {
774 if (Jumps.empty())
775 return 0.0;
776 uint64_t CurAddr = 0;
777 MergedBlocks.forEach([&](const NodeT *Node) {
778 Node->EstimatedAddr = CurAddr;
779 CurAddr += Node->Size;
780 });
781
782 double Score = 0;
783 for (JumpT *Jump : Jumps) {
784 const NodeT *SrcBlock = Jump->Source;
785 const NodeT *DstBlock = Jump->Target;
786 Score += ::extTSPScore(SrcBlock->EstimatedAddr, SrcBlock->Size,
787 DstBlock->EstimatedAddr, Jump->ExecutionCount,
788 Jump->IsConditional);
789 }
790 return Score;
791 }
792
793 /// Compute the gain of merging two chains.
794 ///
795 /// The function considers all possible ways of merging two chains and
796 /// computes the one having the largest increase in ExtTSP objective. The
797 /// result is a pair with the first element being the gain and the second
798 /// element being the corresponding merging type.
799 MergeGainT getBestMergeGain(ChainT *ChainPred, ChainT *ChainSucc,
800 ChainEdge *Edge) const {
801 if (Edge->hasCachedMergeGain(ChainPred, ChainSucc)) {
802 return Edge->getCachedMergeGain(ChainPred, ChainSucc);
803 }
804
805 // Precompute jumps between ChainPred and ChainSucc.
806 auto Jumps = Edge->jumps();
807 ChainEdge *EdgePP = ChainPred->getEdge(ChainPred);
808 if (EdgePP != nullptr) {
809 Jumps.insert(Jumps.end(), EdgePP->jumps().begin(), EdgePP->jumps().end());
810 }
811 assert(!Jumps.empty() && "trying to merge chains w/o jumps");
812
813 // This object holds the best chosen gain of merging two chains.
814 MergeGainT Gain = MergeGainT();
815
816 /// Given a merge offset and a list of merge types, try to merge two chains
817 /// and update Gain with a better alternative.
818 auto tryChainMerging = [&](size_t Offset,
819 const std::vector<MergeTypeT> &MergeTypes) {
820 // Skip merging corresponding to concatenation w/o splitting.
821 if (Offset == 0 || Offset == ChainPred->Nodes.size())
822 return;
823 // Skip merging if it breaks Forced successors.
824 NodeT *Node = ChainPred->Nodes[Offset - 1];
825 if (Node->ForcedSucc != nullptr)
826 return;
827 // Apply the merge, compute the corresponding gain, and update the best
828 // value, if the merge is beneficial.
829 for (const MergeTypeT &MergeType : MergeTypes) {
830 Gain.updateIfLessThan(
831 computeMergeGain(ChainPred, ChainSucc, Jumps, Offset, MergeType));
832 }
833 };
834
835 // Try to concatenate two chains w/o splitting.
836 Gain.updateIfLessThan(
837 computeMergeGain(ChainPred, ChainSucc, Jumps, 0, MergeTypeT::X_Y));
838
840 // Attach (a part of) ChainPred before the first node of ChainSucc.
841 for (JumpT *Jump : ChainSucc->Nodes.front()->InJumps) {
842 const NodeT *SrcBlock = Jump->Source;
843 if (SrcBlock->CurChain != ChainPred)
844 continue;
845 size_t Offset = SrcBlock->CurIndex + 1;
846 tryChainMerging(Offset, {MergeTypeT::X1_Y_X2, MergeTypeT::X2_X1_Y});
847 }
848
849 // Attach (a part of) ChainPred after the last node of ChainSucc.
850 for (JumpT *Jump : ChainSucc->Nodes.back()->OutJumps) {
851 const NodeT *DstBlock = Jump->Target;
852 if (DstBlock->CurChain != ChainPred)
853 continue;
854 size_t Offset = DstBlock->CurIndex;
855 tryChainMerging(Offset, {MergeTypeT::X1_Y_X2, MergeTypeT::Y_X2_X1});
856 }
857 }
858
859 // Try to break ChainPred in various ways and concatenate with ChainSucc.
860 if (ChainPred->Nodes.size() <= ChainSplitThreshold) {
861 for (size_t Offset = 1; Offset < ChainPred->Nodes.size(); Offset++) {
862 // Try to split the chain in different ways. In practice, applying
863 // X2_Y_X1 merging is almost never provides benefits; thus, we exclude
864 // it from consideration to reduce the search space.
865 tryChainMerging(Offset, {MergeTypeT::X1_Y_X2, MergeTypeT::Y_X2_X1,
866 MergeTypeT::X2_X1_Y});
867 }
868 }
869 Edge->setCachedMergeGain(ChainPred, ChainSucc, Gain);
870 return Gain;
871 }
872
873 /// Compute the score gain of merging two chains, respecting a given
874 /// merge 'type' and 'offset'.
875 ///
876 /// The two chains are not modified in the method.
877 MergeGainT computeMergeGain(const ChainT *ChainPred, const ChainT *ChainSucc,
878 const std::vector<JumpT *> &Jumps,
879 size_t MergeOffset, MergeTypeT MergeType) const {
880 auto MergedBlocks =
881 mergeNodes(ChainPred->Nodes, ChainSucc->Nodes, MergeOffset, MergeType);
882
883 // Do not allow a merge that does not preserve the original entry point.
884 if ((ChainPred->isEntry() || ChainSucc->isEntry()) &&
885 !MergedBlocks.getFirstNode()->isEntry())
886 return MergeGainT();
887
888 // The gain for the new chain.
889 auto NewGainScore = extTSPScore(MergedBlocks, Jumps) - ChainPred->Score;
890 return MergeGainT(NewGainScore, MergeOffset, MergeType);
891 }
892
893 /// Merge chain From into chain Into, update the list of active chains,
894 /// adjacency information, and the corresponding cached values.
895 void mergeChains(ChainT *Into, ChainT *From, size_t MergeOffset,
896 MergeTypeT MergeType) {
897 assert(Into != From && "a chain cannot be merged with itself");
898
899 // Merge the nodes.
900 MergedChain MergedNodes =
901 mergeNodes(Into->Nodes, From->Nodes, MergeOffset, MergeType);
902 Into->merge(From, MergedNodes.getNodes());
903
904 // Merge the edges.
905 Into->mergeEdges(From);
906 From->clear();
907
908 // Update cached ext-tsp score for the new chain.
909 ChainEdge *SelfEdge = Into->getEdge(Into);
910 if (SelfEdge != nullptr) {
911 MergedNodes = MergedChain(Into->Nodes.begin(), Into->Nodes.end());
912 Into->Score = extTSPScore(MergedNodes, SelfEdge->jumps());
913 }
914
915 // Remove the chain from the list of active chains.
916 llvm::erase_value(HotChains, From);
917
918 // Invalidate caches.
919 for (auto EdgeIt : Into->Edges)
920 EdgeIt.second->invalidateCache();
921 }
922
923 /// Concatenate all chains into the final order.
924 std::vector<uint64_t> concatChains() {
925 // Collect chains and calculate density stats for their sorting.
926 std::vector<const ChainT *> SortedChains;
928 for (ChainT &Chain : AllChains) {
929 if (!Chain.Nodes.empty()) {
930 SortedChains.push_back(&Chain);
931 // Using doubles to avoid overflow of ExecutionCounts.
932 double Size = 0;
933 double ExecutionCount = 0;
934 for (NodeT *Node : Chain.Nodes) {
935 Size += static_cast<double>(Node->Size);
936 ExecutionCount += static_cast<double>(Node->ExecutionCount);
937 }
938 assert(Size > 0 && "a chain of zero size");
939 ChainDensity[&Chain] = ExecutionCount / Size;
940 }
941 }
942
943 // Sorting chains by density in the decreasing order.
944 std::sort(SortedChains.begin(), SortedChains.end(),
945 [&](const ChainT *L, const ChainT *R) {
946 // Place the entry point is at the beginning of the order.
947 if (L->isEntry() != R->isEntry())
948 return L->isEntry();
949
950 const double DL = ChainDensity[L];
951 const double DR = ChainDensity[R];
952 // Compare by density and break ties by chain identifiers.
953 return std::make_tuple(-DL, L->Id) <
954 std::make_tuple(-DR, R->Id);
955 });
956
957 // Collect the nodes in the order specified by their chains.
958 std::vector<uint64_t> Order;
959 Order.reserve(NumNodes);
960 for (const ChainT *Chain : SortedChains)
961 for (NodeT *Node : Chain->Nodes)
962 Order.push_back(Node->Index);
963 return Order;
964 }
965
966private:
967 /// The number of nodes in the graph.
968 const size_t NumNodes;
969
970 /// Successors of each node.
971 std::vector<std::vector<uint64_t>> SuccNodes;
972
973 /// Predecessors of each node.
974 std::vector<std::vector<uint64_t>> PredNodes;
975
976 /// All nodes (basic blocks) in the graph.
977 std::vector<NodeT> AllNodes;
978
979 /// All jumps between the nodes.
980 std::vector<JumpT> AllJumps;
981
982 /// All chains of nodes.
983 std::vector<ChainT> AllChains;
984
985 /// All edges between the chains.
986 std::vector<ChainEdge> AllEdges;
987
988 /// Active chains. The vector gets updated at runtime when chains are merged.
989 std::vector<ChainT *> HotChains;
990};
991
992/// The implementation of the Cache-Directed Sort (CDS) algorithm for ordering
993/// functions represented by a call graph.
994class CDSortImpl {
995public:
996 CDSortImpl(const CDSortConfig &Config, ArrayRef<uint64_t> NodeSizes,
997 ArrayRef<uint64_t> NodeCounts, ArrayRef<EdgeCount> EdgeCounts,
998 ArrayRef<uint64_t> EdgeOffsets)
999 : Config(Config), NumNodes(NodeSizes.size()) {
1000 initialize(NodeSizes, NodeCounts, EdgeCounts, EdgeOffsets);
1001 }
1002
1003 /// Run the algorithm and return an ordered set of function clusters.
1004 std::vector<uint64_t> run() {
1005 // Merge pairs of chains while improving the objective.
1006 mergeChainPairs();
1007
1008 LLVM_DEBUG(dbgs() << "Cache-directed function sorting reduced the number"
1009 << " of chains from " << NumNodes << " to "
1010 << HotChains.size() << "\n");
1011
1012 // Collect nodes from all the chains.
1013 return concatChains();
1014 }
1015
1016private:
1017 /// Initialize the algorithm's data structures.
1018 void initialize(const ArrayRef<uint64_t> &NodeSizes,
1019 const ArrayRef<uint64_t> &NodeCounts,
1020 const ArrayRef<EdgeCount> &EdgeCounts,
1021 const ArrayRef<uint64_t> &EdgeOffsets) {
1022 // Initialize nodes.
1023 AllNodes.reserve(NumNodes);
1024 for (uint64_t Node = 0; Node < NumNodes; Node++) {
1025 uint64_t Size = std::max<uint64_t>(NodeSizes[Node], 1ULL);
1026 uint64_t ExecutionCount = NodeCounts[Node];
1027 AllNodes.emplace_back(Node, Size, ExecutionCount);
1028 TotalSamples += ExecutionCount;
1029 if (ExecutionCount > 0)
1030 TotalSize += Size;
1031 }
1032
1033 // Initialize jumps between the nodes.
1034 SuccNodes.resize(NumNodes);
1035 PredNodes.resize(NumNodes);
1036 AllJumps.reserve(EdgeCounts.size());
1037 for (size_t I = 0; I < EdgeCounts.size(); I++) {
1038 auto [Pred, Succ, Count] = EdgeCounts[I];
1039 // Ignore recursive calls.
1040 if (Pred == Succ)
1041 continue;
1042
1043 SuccNodes[Pred].push_back(Succ);
1044 PredNodes[Succ].push_back(Pred);
1045 if (Count > 0) {
1046 NodeT &PredNode = AllNodes[Pred];
1047 NodeT &SuccNode = AllNodes[Succ];
1048 AllJumps.emplace_back(&PredNode, &SuccNode, Count);
1049 AllJumps.back().Offset = EdgeOffsets[I];
1050 SuccNode.InJumps.push_back(&AllJumps.back());
1051 PredNode.OutJumps.push_back(&AllJumps.back());
1052 }
1053 }
1054
1055 // Initialize chains.
1056 AllChains.reserve(NumNodes);
1057 HotChains.reserve(NumNodes);
1058 for (NodeT &Node : AllNodes) {
1059 // Adjust execution counts.
1060 Node.ExecutionCount = std::max(Node.ExecutionCount, Node.inCount());
1061 Node.ExecutionCount = std::max(Node.ExecutionCount, Node.outCount());
1062 // Create chain.
1063 AllChains.emplace_back(Node.Index, &Node);
1064 Node.CurChain = &AllChains.back();
1065 if (Node.ExecutionCount > 0)
1066 HotChains.push_back(&AllChains.back());
1067 }
1068
1069 // Initialize chain edges.
1070 AllEdges.reserve(AllJumps.size());
1071 for (NodeT &PredNode : AllNodes) {
1072 for (JumpT *Jump : PredNode.OutJumps) {
1073 NodeT *SuccNode = Jump->Target;
1074 ChainEdge *CurEdge = PredNode.CurChain->getEdge(SuccNode->CurChain);
1075 // this edge is already present in the graph.
1076 if (CurEdge != nullptr) {
1077 assert(SuccNode->CurChain->getEdge(PredNode.CurChain) != nullptr);
1078 CurEdge->appendJump(Jump);
1079 continue;
1080 }
1081 // this is a new edge.
1082 AllEdges.emplace_back(Jump);
1083 PredNode.CurChain->addEdge(SuccNode->CurChain, &AllEdges.back());
1084 SuccNode->CurChain->addEdge(PredNode.CurChain, &AllEdges.back());
1085 }
1086 }
1087 }
1088
1089 /// Merge pairs of chains while there is an improvement in the objective.
1090 void mergeChainPairs() {
1091 // Create a priority queue containing all edges ordered by the merge gain.
1092 auto GainComparator = [](ChainEdge *L, ChainEdge *R) {
1093 return std::make_tuple(-L->gain(), L->srcChain()->Id, L->dstChain()->Id) <
1094 std::make_tuple(-R->gain(), R->srcChain()->Id, R->dstChain()->Id);
1095 };
1096 std::set<ChainEdge *, decltype(GainComparator)> Queue(GainComparator);
1097
1098 // Insert the edges into the queue.
1099 for (ChainT *ChainPred : HotChains) {
1100 for (const auto &[_, Edge] : ChainPred->Edges) {
1101 // Ignore self-edges.
1102 if (Edge->isSelfEdge())
1103 continue;
1104 // Ignore already processed edges.
1105 if (Edge->gain() != -1.0)
1106 continue;
1107
1108 // Compute the gain of merging the two chains.
1109 MergeGainT Gain = getBestMergeGain(Edge);
1110 Edge->setMergeGain(Gain);
1111
1112 if (Edge->gain() > EPS)
1113 Queue.insert(Edge);
1114 }
1115 }
1116
1117 // Merge the chains while the gain of merging is positive.
1118 while (!Queue.empty()) {
1119 // Extract the best (top) edge for merging.
1120 ChainEdge *BestEdge = *Queue.begin();
1121 Queue.erase(Queue.begin());
1122 // Ignore self-edges.
1123 if (BestEdge->isSelfEdge())
1124 continue;
1125 // Ignore edges with non-positive gains.
1126 if (BestEdge->gain() <= EPS)
1127 continue;
1128
1129 ChainT *BestSrcChain = BestEdge->srcChain();
1130 ChainT *BestDstChain = BestEdge->dstChain();
1131
1132 // Remove outdated edges from the queue.
1133 for (const auto &[_, ChainEdge] : BestSrcChain->Edges)
1134 Queue.erase(ChainEdge);
1135 for (const auto &[_, ChainEdge] : BestDstChain->Edges)
1136 Queue.erase(ChainEdge);
1137
1138 // Merge the best pair of chains.
1139 MergeGainT BestGain = BestEdge->getMergeGain();
1140 mergeChains(BestSrcChain, BestDstChain, BestGain.mergeOffset(),
1141 BestGain.mergeType());
1142
1143 // Insert newly created edges into the queue.
1144 for (const auto &[_, Edge] : BestSrcChain->Edges) {
1145 // Ignore loop edges.
1146 if (Edge->isSelfEdge())
1147 continue;
1148
1149 // Compute the gain of merging the two chains.
1150 MergeGainT Gain = getBestMergeGain(Edge);
1151 Edge->setMergeGain(Gain);
1152
1153 if (Edge->gain() > EPS)
1154 Queue.insert(Edge);
1155 }
1156 }
1157 }
1158
1159 /// Compute the gain of merging two chains.
1160 ///
1161 /// The function considers all possible ways of merging two chains and
1162 /// computes the one having the largest increase in ExtTSP objective. The
1163 /// result is a pair with the first element being the gain and the second
1164 /// element being the corresponding merging type.
1165 MergeGainT getBestMergeGain(ChainEdge *Edge) const {
1166 // Precompute jumps between ChainPred and ChainSucc.
1167 auto Jumps = Edge->jumps();
1168 assert(!Jumps.empty() && "trying to merge chains w/o jumps");
1169 ChainT *SrcChain = Edge->srcChain();
1170 ChainT *DstChain = Edge->dstChain();
1171
1172 // This object holds the best currently chosen gain of merging two chains.
1173 MergeGainT Gain = MergeGainT();
1174
1175 /// Given a list of merge types, try to merge two chains and update Gain
1176 /// with a better alternative.
1177 auto tryChainMerging = [&](const std::vector<MergeTypeT> &MergeTypes) {
1178 // Apply the merge, compute the corresponding gain, and update the best
1179 // value, if the merge is beneficial.
1180 for (const MergeTypeT &MergeType : MergeTypes) {
1181 MergeGainT NewGain =
1182 computeMergeGain(SrcChain, DstChain, Jumps, MergeType);
1183
1184 // When forward and backward gains are the same, prioritize merging that
1185 // preserves the original order of the functions in the binary.
1186 if (std::abs(Gain.score() - NewGain.score()) < EPS) {
1187 if ((MergeType == MergeTypeT::X_Y && SrcChain->Id < DstChain->Id) ||
1188 (MergeType == MergeTypeT::Y_X && SrcChain->Id > DstChain->Id)) {
1189 Gain = NewGain;
1190 }
1191 } else if (NewGain.score() > Gain.score() + EPS) {
1192 Gain = NewGain;
1193 }
1194 }
1195 };
1196
1197 // Try to concatenate two chains w/o splitting.
1198 tryChainMerging({MergeTypeT::X_Y, MergeTypeT::Y_X});
1199
1200 return Gain;
1201 }
1202
1203 /// Compute the score gain of merging two chains, respecting a given type.
1204 ///
1205 /// The two chains are not modified in the method.
1206 MergeGainT computeMergeGain(ChainT *ChainPred, ChainT *ChainSucc,
1207 const std::vector<JumpT *> &Jumps,
1208 MergeTypeT MergeType) const {
1209 // This doesn't depend on the ordering of the nodes
1210 double FreqGain = freqBasedLocalityGain(ChainPred, ChainSucc);
1211
1212 // Merge offset is always 0, as the chains are not split.
1213 size_t MergeOffset = 0;
1214 auto MergedBlocks =
1215 mergeNodes(ChainPred->Nodes, ChainSucc->Nodes, MergeOffset, MergeType);
1216 double DistGain = distBasedLocalityGain(MergedBlocks, Jumps);
1217
1218 double GainScore = DistGain + Config.FrequencyScale * FreqGain;
1219 // Scale the result to increase the importance of merging short chains.
1220 if (GainScore >= 0.0)
1221 GainScore /= std::min(ChainPred->Size, ChainSucc->Size);
1222
1223 return MergeGainT(GainScore, MergeOffset, MergeType);
1224 }
1225
1226 /// Compute the change of the frequency locality after merging the chains.
1227 double freqBasedLocalityGain(ChainT *ChainPred, ChainT *ChainSucc) const {
1228 auto missProbability = [&](double ChainDensity) {
1229 double PageSamples = ChainDensity * Config.CacheSize;
1230 if (PageSamples >= TotalSamples)
1231 return 0.0;
1232 double P = PageSamples / TotalSamples;
1233 return pow(1.0 - P, static_cast<double>(Config.CacheEntries));
1234 };
1235
1236 // Cache misses on the chains before merging.
1237 double CurScore =
1238 ChainPred->ExecutionCount * missProbability(ChainPred->density()) +
1239 ChainSucc->ExecutionCount * missProbability(ChainSucc->density());
1240
1241 // Cache misses on the merged chain
1242 double MergedCounts = ChainPred->ExecutionCount + ChainSucc->ExecutionCount;
1243 double MergedSize = ChainPred->Size + ChainSucc->Size;
1244 double MergedDensity = static_cast<double>(MergedCounts) / MergedSize;
1245 double NewScore = MergedCounts * missProbability(MergedDensity);
1246
1247 return CurScore - NewScore;
1248 }
1249
1250 /// Compute the distance locality for a jump / call.
1251 double distScore(uint64_t SrcAddr, uint64_t DstAddr, uint64_t Count) const {
1252 uint64_t Dist = SrcAddr <= DstAddr ? DstAddr - SrcAddr : SrcAddr - DstAddr;
1253 double D = Dist == 0 ? 0.1 : static_cast<double>(Dist);
1254 return static_cast<double>(Count) * std::pow(D, -Config.DistancePower);
1255 }
1256
1257 /// Compute the change of the distance locality after merging the chains.
1258 double distBasedLocalityGain(const MergedChain &MergedBlocks,
1259 const std::vector<JumpT *> &Jumps) const {
1260 if (Jumps.empty())
1261 return 0.0;
1262 uint64_t CurAddr = 0;
1263 MergedBlocks.forEach([&](const NodeT *Node) {
1264 Node->EstimatedAddr = CurAddr;
1265 CurAddr += Node->Size;
1266 });
1267
1268 double CurScore = 0;
1269 double NewScore = 0;
1270 for (const JumpT *Arc : Jumps) {
1271 uint64_t SrcAddr = Arc->Source->EstimatedAddr + Arc->Offset;
1272 uint64_t DstAddr = Arc->Target->EstimatedAddr;
1273 NewScore += distScore(SrcAddr, DstAddr, Arc->ExecutionCount);
1274 CurScore += distScore(0, TotalSize, Arc->ExecutionCount);
1275 }
1276 return NewScore - CurScore;
1277 }
1278
1279 /// Merge chain From into chain Into, update the list of active chains,
1280 /// adjacency information, and the corresponding cached values.
1281 void mergeChains(ChainT *Into, ChainT *From, size_t MergeOffset,
1282 MergeTypeT MergeType) {
1283 assert(Into != From && "a chain cannot be merged with itself");
1284
1285 // Merge the nodes.
1286 MergedChain MergedNodes =
1287 mergeNodes(Into->Nodes, From->Nodes, MergeOffset, MergeType);
1288 Into->merge(From, MergedNodes.getNodes());
1289
1290 // Merge the edges.
1291 Into->mergeEdges(From);
1292 From->clear();
1293
1294 // Remove the chain from the list of active chains.
1295 llvm::erase_value(HotChains, From);
1296 }
1297
1298 /// Concatenate all chains into the final order.
1299 std::vector<uint64_t> concatChains() {
1300 // Collect chains and calculate density stats for their sorting.
1301 std::vector<const ChainT *> SortedChains;
1303 for (ChainT &Chain : AllChains) {
1304 if (!Chain.Nodes.empty()) {
1305 SortedChains.push_back(&Chain);
1306 // Using doubles to avoid overflow of ExecutionCounts.
1307 double Size = 0;
1308 double ExecutionCount = 0;
1309 for (NodeT *Node : Chain.Nodes) {
1310 Size += static_cast<double>(Node->Size);
1311 ExecutionCount += static_cast<double>(Node->ExecutionCount);
1312 }
1313 assert(Size > 0 && "a chain of zero size");
1314 ChainDensity[&Chain] = ExecutionCount / Size;
1315 }
1316 }
1317
1318 // Sort chains by density in the decreasing order.
1319 std::sort(SortedChains.begin(), SortedChains.end(),
1320 [&](const ChainT *L, const ChainT *R) {
1321 const double DL = ChainDensity[L];
1322 const double DR = ChainDensity[R];
1323 // Compare by density and break ties by chain identifiers.
1324 return std::make_tuple(-DL, L->Id) <
1325 std::make_tuple(-DR, R->Id);
1326 });
1327
1328 // Collect the nodes in the order specified by their chains.
1329 std::vector<uint64_t> Order;
1330 Order.reserve(NumNodes);
1331 for (const ChainT *Chain : SortedChains)
1332 for (NodeT *Node : Chain->Nodes)
1333 Order.push_back(Node->Index);
1334 return Order;
1335 }
1336
1337private:
1338 /// Config for the algorithm.
1339 const CDSortConfig Config;
1340
1341 /// The number of nodes in the graph.
1342 const size_t NumNodes;
1343
1344 /// Successors of each node.
1345 std::vector<std::vector<uint64_t>> SuccNodes;
1346
1347 /// Predecessors of each node.
1348 std::vector<std::vector<uint64_t>> PredNodes;
1349
1350 /// All nodes (functions) in the graph.
1351 std::vector<NodeT> AllNodes;
1352
1353 /// All jumps (function calls) between the nodes.
1354 std::vector<JumpT> AllJumps;
1355
1356 /// All chains of nodes.
1357 std::vector<ChainT> AllChains;
1358
1359 /// All edges between the chains.
1360 std::vector<ChainEdge> AllEdges;
1361
1362 /// Active chains. The vector gets updated at runtime when chains are merged.
1363 std::vector<ChainT *> HotChains;
1364
1365 /// The total number of samples in the graph.
1366 uint64_t TotalSamples{0};
1367
1368 /// The total size of the nodes in the graph.
1369 uint64_t TotalSize{0};
1370};
1371
1372} // end of anonymous namespace
1373
1374std::vector<uint64_t>
1376 ArrayRef<uint64_t> NodeCounts,
1377 ArrayRef<EdgeCount> EdgeCounts) {
1378 // Verify correctness of the input data.
1379 assert(NodeCounts.size() == NodeSizes.size() && "Incorrect input");
1380 assert(NodeSizes.size() > 2 && "Incorrect input");
1381
1382 // Apply the reordering algorithm.
1383 ExtTSPImpl Alg(NodeSizes, NodeCounts, EdgeCounts);
1384 std::vector<uint64_t> Result = Alg.run();
1385
1386 // Verify correctness of the output.
1387 assert(Result.front() == 0 && "Original entry point is not preserved");
1388 assert(Result.size() == NodeSizes.size() && "Incorrect size of layout");
1389 return Result;
1390}
1391
1393 ArrayRef<uint64_t> NodeSizes,
1394 ArrayRef<uint64_t> NodeCounts,
1395 ArrayRef<EdgeCount> EdgeCounts) {
1396 // Estimate addresses of the blocks in memory.
1397 std::vector<uint64_t> Addr(NodeSizes.size(), 0);
1398 for (size_t Idx = 1; Idx < Order.size(); Idx++) {
1399 Addr[Order[Idx]] = Addr[Order[Idx - 1]] + NodeSizes[Order[Idx - 1]];
1400 }
1401 std::vector<uint64_t> OutDegree(NodeSizes.size(), 0);
1402 for (auto Edge : EdgeCounts)
1403 ++OutDegree[Edge.src];
1404
1405 // Increase the score for each jump.
1406 double Score = 0;
1407 for (auto Edge : EdgeCounts) {
1408 bool IsConditional = OutDegree[Edge.src] > 1;
1409 Score += ::extTSPScore(Addr[Edge.src], NodeSizes[Edge.src], Addr[Edge.dst],
1410 Edge.count, IsConditional);
1411 }
1412 return Score;
1413}
1414
1416 ArrayRef<uint64_t> NodeCounts,
1417 ArrayRef<EdgeCount> EdgeCounts) {
1418 std::vector<uint64_t> Order(NodeSizes.size());
1419 for (size_t Idx = 0; Idx < NodeSizes.size(); Idx++) {
1420 Order[Idx] = Idx;
1421 }
1422 return calcExtTspScore(Order, NodeSizes, NodeCounts, EdgeCounts);
1423}
1424
1426 const CDSortConfig &Config, ArrayRef<uint64_t> FuncSizes,
1427 ArrayRef<uint64_t> FuncCounts, ArrayRef<EdgeCount> CallCounts,
1428 ArrayRef<uint64_t> CallOffsets) {
1429 // Verify correctness of the input data.
1430 assert(FuncCounts.size() == FuncSizes.size() && "Incorrect input");
1431
1432 // Apply the reordering algorithm.
1433 CDSortImpl Alg(Config, FuncSizes, FuncCounts, CallCounts, CallOffsets);
1434 std::vector<uint64_t> Result = Alg.run();
1435 assert(Result.size() == FuncSizes.size() && "Incorrect size of layout");
1436 return Result;
1437}
1438
1440 ArrayRef<uint64_t> FuncSizes, ArrayRef<uint64_t> FuncCounts,
1441 ArrayRef<EdgeCount> CallCounts, ArrayRef<uint64_t> CallOffsets) {
1443 // Populate the config from the command-line options.
1444 if (CacheEntries.getNumOccurrences() > 0)
1445 Config.CacheEntries = CacheEntries;
1446 if (CacheSize.getNumOccurrences() > 0)
1447 Config.CacheSize = CacheSize;
1448 if (DistancePower.getNumOccurrences() > 0)
1449 Config.DistancePower = DistancePower;
1450 if (FrequencyScale.getNumOccurrences() > 0)
1451 Config.FrequencyScale = FrequencyScale;
1452 return computeCacheDirectedLayout(Config, FuncSizes, FuncCounts, CallCounts,
1453 CallOffsets);
1454}
BlockVerifier::State From
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static cl::opt< double > DistancePower("cds-distance-power", cl::ReallyHidden, cl::desc("The power exponent for the distance-based locality"))
static cl::opt< double > FrequencyScale("cds-frequency-scale", cl::ReallyHidden, cl::desc("The scale factor for the frequency-based locality"))
static cl::opt< unsigned > CacheEntries("cds-cache-entries", cl::ReallyHidden, cl::desc("The size of the cache"))
static cl::opt< bool > EnableChainSplitAlongJumps("ext-tsp-enable-chain-split-along-jumps", cl::ReallyHidden, cl::init(true), cl::desc("The maximum size of a chain to apply splitting"))
static cl::opt< unsigned > ForwardDistance("ext-tsp-forward-distance", cl::ReallyHidden, cl::init(1024), cl::desc("The maximum distance (in bytes) of a forward jump for ExtTSP"))
static cl::opt< unsigned > BackwardDistance("ext-tsp-backward-distance", cl::ReallyHidden, cl::init(640), cl::desc("The maximum distance (in bytes) of a backward jump for ExtTSP"))
static cl::opt< double > BackwardWeightCond("ext-tsp-backward-weight-cond", cl::ReallyHidden, cl::init(0.1), cl::desc("The weight of conditional backward jumps for ExtTSP value"))
static cl::opt< double > ForwardWeightUncond("ext-tsp-forward-weight-uncond", cl::ReallyHidden, cl::init(0.1), cl::desc("The weight of unconditional forward jumps for ExtTSP value"))
static cl::opt< unsigned > ChainSplitThreshold("ext-tsp-chain-split-threshold", cl::ReallyHidden, cl::init(128), cl::desc("The maximum size of a chain to apply splitting"))
static cl::opt< unsigned > MaxChainSize("ext-tsp-max-chain-size", cl::ReallyHidden, cl::init(4096), cl::desc("The maximum size of a chain to create."))
static cl::opt< double > FallthroughWeightUncond("ext-tsp-fallthrough-weight-uncond", cl::ReallyHidden, cl::init(1.05), cl::desc("The weight of unconditional fallthrough jumps for ExtTSP value"))
static cl::opt< unsigned > CacheSize("cds-cache-size", cl::ReallyHidden, cl::desc("The size of a line in the cache"))
static cl::opt< double > BackwardWeightUncond("ext-tsp-backward-weight-uncond", cl::ReallyHidden, cl::init(0.1), cl::desc("The weight of unconditional backward jumps for ExtTSP value"))
static cl::opt< double > ForwardWeightCond("ext-tsp-forward-weight-cond", cl::ReallyHidden, cl::init(0.1), cl::desc("The weight of conditional forward jumps for ExtTSP value"))
static cl::opt< double > FallthroughWeightCond("ext-tsp-fallthrough-weight-cond", cl::ReallyHidden, cl::init(1.0), cl::desc("The weight of conditional fallthrough jumps for ExtTSP value"))
Declares methods and data structures for code layout algorithms.
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:150
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1272
RelaxConfig Config
Definition: ELF_riscv.cpp:495
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define _
static void addEdge(SmallVectorImpl< LazyCallGraph::Edge > &Edges, DenseMap< LazyCallGraph::Node *, int > &EdgeIndexMap, LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK)
static LoopDeletionResult merge(LoopDeletionResult A, LoopDeletionResult B)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition: ArrayRef.h:174
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
Target - Wrapper for Target specific information.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
std::vector< uint64_t > computeCacheDirectedLayout(ArrayRef< uint64_t > FuncSizes, ArrayRef< uint64_t > FuncCounts, ArrayRef< EdgeCount > CallCounts, ArrayRef< uint64_t > CallOffsets)
Apply a Cache-Directed Sort for functions represented by a call graph.
double calcExtTspScore(ArrayRef< uint64_t > Order, ArrayRef< uint64_t > NodeSizes, ArrayRef< uint64_t > NodeCounts, ArrayRef< EdgeCount > EdgeCounts)
Estimate the "quality" of a given node order in CFG.
std::vector< uint64_t > computeExtTspLayout(ArrayRef< uint64_t > NodeSizes, ArrayRef< uint64_t > NodeCounts, ArrayRef< EdgeCount > EdgeCounts)
Find a layout of nodes (basic blocks) of a given CFG optimizing jump locality and thus processor I-ca...
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1685
cl::opt< bool > ApplyExtTspWithoutProfile
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void erase_value(Container &C, ValueType V)
Wrapper function to remove a value from a container:
Definition: STLExtras.h:2029
cl::opt< bool > EnableExtTspBlockPlacement
Algorithm-specific params for Cache-Directed Sort.
Definition: CodeLayout.h:63