LLVM  15.0.0git
RDFLiveness.cpp
Go to the documentation of this file.
1 //===- RDFLiveness.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Computation of the liveness information from the data-flow graph.
10 //
11 // The main functionality of this code is to compute block live-in
12 // information. With the live-in information in place, the placement
13 // of kill flags can also be recalculated.
14 //
15 // The block live-in calculation is based on the ideas from the following
16 // publication:
17 //
18 // Dibyendu Das, Ramakrishna Upadrasta, Benoit Dupont de Dinechin.
19 // "Efficient Liveness Computation Using Merge Sets and DJ-Graphs."
20 // ACM Transactions on Architecture and Code Optimization, Association for
21 // Computing Machinery, 2012, ACM TACO Special Issue on "High-Performance
22 // and Embedded Architectures and Compilers", 8 (4),
23 // <10.1145/2086696.2086706>. <hal-00647369>
24 //
26 #include "llvm/ADT/BitVector.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetVector.h"
30 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/CodeGen/RDFGraph.h"
39 #include "llvm/MC/LaneBitmask.h"
40 #include "llvm/MC/MCRegisterInfo.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstdint>
47 #include <iterator>
48 #include <map>
49 #include <unordered_map>
50 #include <utility>
51 #include <vector>
52 
53 using namespace llvm;
54 using namespace rdf;
55 
56 static cl::opt<unsigned> MaxRecNest("rdf-liveness-max-rec", cl::init(25),
57  cl::Hidden, cl::desc("Maximum recursion level"));
58 
59 namespace llvm {
60 namespace rdf {
61 
63  OS << '{';
64  for (auto &I : P.Obj) {
65  OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{';
66  for (auto J = I.second.begin(), E = I.second.end(); J != E; ) {
67  OS << Print<NodeId>(J->first, P.G) << PrintLaneMaskOpt(J->second);
68  if (++J != E)
69  OS << ',';
70  }
71  OS << '}';
72  }
73  OS << " }";
74  return OS;
75  }
76 
77 } // end namespace rdf
78 } // end namespace llvm
79 
80 // The order in the returned sequence is the order of reaching defs in the
81 // upward traversal: the first def is the closest to the given reference RefA,
82 // the next one is further up, and so on.
83 // The list ends at a reaching phi def, or when the reference from RefA is
84 // covered by the defs in the list (see FullChain).
85 // This function provides two modes of operation:
86 // (1) Returning the sequence of reaching defs for a particular reference
87 // node. This sequence will terminate at the first phi node [1].
88 // (2) Returning a partial sequence of reaching defs, where the final goal
89 // is to traverse past phi nodes to the actual defs arising from the code
90 // itself.
91 // In mode (2), the register reference for which the search was started
92 // may be different from the reference node RefA, for which this call was
93 // made, hence the argument RefRR, which holds the original register.
94 // Also, some definitions may have already been encountered in a previous
95 // call that will influence register covering. The register references
96 // already defined are passed in through DefRRs.
97 // In mode (1), the "continuation" considerations do not apply, and the
98 // RefRR is the same as the register in RefA, and the set DefRRs is empty.
99 //
100 // [1] It is possible for multiple phi nodes to be included in the returned
101 // sequence:
102 // SubA = phi ...
103 // SubB = phi ...
104 // ... = SuperAB(rdef:SubA), SuperAB"(rdef:SubB)
105 // However, these phi nodes are independent from one another in terms of
106 // the data-flow.
107 
109  NodeAddr<RefNode*> RefA, bool TopShadows, bool FullChain,
110  const RegisterAggr &DefRRs) {
111  NodeList RDefs; // Return value.
112  SetVector<NodeId> DefQ;
114 
115  // Dead defs will be treated as if they were live, since they are actually
116  // on the data-flow path. They cannot be ignored because even though they
117  // do not generate meaningful values, they still modify registers.
118 
119  // If the reference is undefined, there is nothing to do.
120  if (RefA.Addr->getFlags() & NodeAttrs::Undef)
121  return RDefs;
122 
123  // The initial queue should not have reaching defs for shadows. The
124  // whole point of a shadow is that it will have a reaching def that
125  // is not aliased to the reaching defs of the related shadows.
126  NodeId Start = RefA.Id;
127  auto SNA = DFG.addr<RefNode*>(Start);
128  if (NodeId RD = SNA.Addr->getReachingDef())
129  DefQ.insert(RD);
130  if (TopShadows) {
131  for (auto S : DFG.getRelatedRefs(RefA.Addr->getOwner(DFG), RefA))
132  if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
133  DefQ.insert(RD);
134  }
135 
136  // Collect all the reaching defs, going up until a phi node is encountered,
137  // or there are no more reaching defs. From this set, the actual set of
138  // reaching defs will be selected.
139  // The traversal upwards must go on until a covering def is encountered.
140  // It is possible that a collection of non-covering (individually) defs
141  // will be sufficient, but keep going until a covering one is found.
142  for (unsigned i = 0; i < DefQ.size(); ++i) {
143  auto TA = DFG.addr<DefNode*>(DefQ[i]);
144  if (TA.Addr->getFlags() & NodeAttrs::PhiRef)
145  continue;
146  // Stop at the covering/overwriting def of the initial register reference.
147  RegisterRef RR = TA.Addr->getRegRef(DFG);
148  if (!DFG.IsPreservingDef(TA))
149  if (RegisterAggr::isCoverOf(RR, RefRR, PRI))
150  continue;
151  // Get the next level of reaching defs. This will include multiple
152  // reaching defs for shadows.
153  for (auto S : DFG.getRelatedRefs(TA.Addr->getOwner(DFG), TA))
154  if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
155  DefQ.insert(RD);
156  // Don't visit sibling defs. They share the same reaching def (which
157  // will be visited anyway), but they define something not aliased to
158  // this ref.
159  }
160 
161  // Return the MachineBasicBlock containing a given instruction.
162  auto Block = [this] (NodeAddr<InstrNode*> IA) -> MachineBasicBlock* {
163  if (IA.Addr->getKind() == NodeAttrs::Stmt)
164  return NodeAddr<StmtNode*>(IA).Addr->getCode()->getParent();
165  assert(IA.Addr->getKind() == NodeAttrs::Phi);
166  NodeAddr<PhiNode*> PA = IA;
167  NodeAddr<BlockNode*> BA = PA.Addr->getOwner(DFG);
168  return BA.Addr->getCode();
169  };
170 
171  SmallSet<NodeId,32> Defs;
172 
173  // Remove all non-phi defs that are not aliased to RefRR, and separate
174  // the the remaining defs into buckets for containing blocks.
175  std::map<NodeId, NodeAddr<InstrNode*>> Owners;
176  std::map<MachineBasicBlock*, SmallVector<NodeId,32>> Blocks;
177  for (NodeId N : DefQ) {
178  auto TA = DFG.addr<DefNode*>(N);
179  bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef;
180  if (!IsPhi && !PRI.alias(RefRR, TA.Addr->getRegRef(DFG)))
181  continue;
182  Defs.insert(TA.Id);
183  NodeAddr<InstrNode*> IA = TA.Addr->getOwner(DFG);
184  Owners[TA.Id] = IA;
185  Blocks[Block(IA)].push_back(IA.Id);
186  }
187 
188  auto Precedes = [this,&OrdMap] (NodeId A, NodeId B) {
189  if (A == B)
190  return false;
191  NodeAddr<InstrNode*> OA = DFG.addr<InstrNode*>(A);
193  bool StmtA = OA.Addr->getKind() == NodeAttrs::Stmt;
194  bool StmtB = OB.Addr->getKind() == NodeAttrs::Stmt;
195  if (StmtA && StmtB) {
196  const MachineInstr *InA = NodeAddr<StmtNode*>(OA).Addr->getCode();
197  const MachineInstr *InB = NodeAddr<StmtNode*>(OB).Addr->getCode();
198  assert(InA->getParent() == InB->getParent());
199  auto FA = OrdMap.find(InA);
200  if (FA != OrdMap.end())
201  return FA->second < OrdMap.find(InB)->second;
202  const MachineBasicBlock *BB = InA->getParent();
203  for (auto It = BB->begin(), E = BB->end(); It != E; ++It) {
204  if (It == InA->getIterator())
205  return true;
206  if (It == InB->getIterator())
207  return false;
208  }
209  llvm_unreachable("InA and InB should be in the same block");
210  }
211  // One of them is a phi node.
212  if (!StmtA && !StmtB) {
213  // Both are phis, which are unordered. Break the tie by id numbers.
214  return A < B;
215  }
216  // Only one of them is a phi. Phis always precede statements.
217  return !StmtA;
218  };
219 
220  auto GetOrder = [&OrdMap] (MachineBasicBlock &B) {
221  uint32_t Pos = 0;
222  for (MachineInstr &In : B)
223  OrdMap.insert({&In, ++Pos});
224  };
225 
226  // For each block, sort the nodes in it.
227  std::vector<MachineBasicBlock*> TmpBB;
228  for (auto &Bucket : Blocks) {
229  TmpBB.push_back(Bucket.first);
230  if (Bucket.second.size() > 2)
231  GetOrder(*Bucket.first);
232  llvm::sort(Bucket.second, Precedes);
233  }
234 
235  // Sort the blocks with respect to dominance.
236  llvm::sort(TmpBB,
237  [this](auto A, auto B) { return MDT.properlyDominates(A, B); });
238 
239  std::vector<NodeId> TmpInst;
240  for (MachineBasicBlock *MBB : llvm::reverse(TmpBB)) {
241  auto &Bucket = Blocks[MBB];
242  TmpInst.insert(TmpInst.end(), Bucket.rbegin(), Bucket.rend());
243  }
244 
245  // The vector is a list of instructions, so that defs coming from
246  // the same instruction don't need to be artificially ordered.
247  // Then, when computing the initial segment, and iterating over an
248  // instruction, pick the defs that contribute to the covering (i.e. is
249  // not covered by previously added defs). Check the defs individually,
250  // i.e. first check each def if is covered or not (without adding them
251  // to the tracking set), and then add all the selected ones.
252 
253  // The reason for this is this example:
254  // *d1<A>, *d2<B>, ... Assume A and B are aliased (can happen in phi nodes).
255  // *d3<C> If A \incl BuC, and B \incl AuC, then *d2 would be
256  // covered if we added A first, and A would be covered
257  // if we added B first.
258  // In this example we want both A and B, because we don't want to give
259  // either one priority over the other, since they belong to the same
260  // statement.
261 
262  RegisterAggr RRs(DefRRs);
263 
264  auto DefInSet = [&Defs] (NodeAddr<RefNode*> TA) -> bool {
265  return TA.Addr->getKind() == NodeAttrs::Def &&
266  Defs.count(TA.Id);
267  };
268 
269  for (NodeId T : TmpInst) {
270  if (!FullChain && RRs.hasCoverOf(RefRR))
271  break;
272  auto TA = DFG.addr<InstrNode*>(T);
273  bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA);
274  NodeList Ds;
275  for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) {
276  RegisterRef QR = DA.Addr->getRegRef(DFG);
277  // Add phi defs even if they are covered by subsequent defs. This is
278  // for cases where the reached use is not covered by any of the defs
279  // encountered so far: the phi def is needed to expose the liveness
280  // of that use to the entry of the block.
281  // Example:
282  // phi d1<R3>(,d2,), ... Phi def d1 is covered by d2.
283  // d2<R3>(d1,,u3), ...
284  // ..., u3<D1>(d2) This use needs to be live on entry.
285  if (FullChain || IsPhi || !RRs.hasCoverOf(QR))
286  Ds.push_back(DA);
287  }
288  llvm::append_range(RDefs, Ds);
289  for (NodeAddr<DefNode*> DA : Ds) {
290  // When collecting a full chain of definitions, do not consider phi
291  // defs to actually define a register.
292  uint16_t Flags = DA.Addr->getFlags();
293  if (!FullChain || !(Flags & NodeAttrs::PhiRef))
294  if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here.
295  RRs.insert(DA.Addr->getRegRef(DFG));
296  }
297  }
298 
299  auto DeadP = [](const NodeAddr<DefNode*> DA) -> bool {
300  return DA.Addr->getFlags() & NodeAttrs::Dead;
301  };
302  llvm::erase_if(RDefs, DeadP);
303 
304  return RDefs;
305 }
306 
307 std::pair<NodeSet,bool>
309  NodeSet &Visited, const NodeSet &Defs) {
310  return getAllReachingDefsRecImpl(RefRR, RefA, Visited, Defs, 0, MaxRecNest);
311 }
312 
313 std::pair<NodeSet,bool>
314 Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
315  NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) {
316  if (Nest > MaxNest)
317  return { NodeSet(), false };
318  // Collect all defined registers. Do not consider phis to be defining
319  // anything, only collect "real" definitions.
320  RegisterAggr DefRRs(PRI);
321  for (NodeId D : Defs) {
322  const auto DA = DFG.addr<const DefNode*>(D);
323  if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
324  DefRRs.insert(DA.Addr->getRegRef(DFG));
325  }
326 
327  NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs);
328  if (RDs.empty())
329  return { Defs, true };
330 
331  // Make a copy of the preexisting definitions and add the newly found ones.
332  NodeSet TmpDefs = Defs;
333  for (NodeAddr<NodeBase*> R : RDs)
334  TmpDefs.insert(R.Id);
335 
336  NodeSet Result = Defs;
337 
338  for (NodeAddr<DefNode*> DA : RDs) {
339  Result.insert(DA.Id);
340  if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
341  continue;
342  NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG);
343  if (Visited.count(PA.Id))
344  continue;
345  Visited.insert(PA.Id);
346  // Go over all phi uses and get the reaching defs for each use.
347  for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
348  const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs,
349  Nest+1, MaxNest);
350  if (!T.second)
351  return { T.first, false };
352  Result.insert(T.first.begin(), T.first.end());
353  }
354  }
355 
356  return { Result, true };
357 }
358 
359 /// Find the nearest ref node aliased to RefRR, going upwards in the data
360 /// flow, starting from the instruction immediately preceding Inst.
363  NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
364  NodeList Ins = BA.Addr->members(DFG);
365  NodeId FindId = IA.Id;
366  auto E = Ins.rend();
367  auto B = std::find_if(Ins.rbegin(), E,
368  [FindId] (const NodeAddr<InstrNode*> T) {
369  return T.Id == FindId;
370  });
371  // Do not scan IA (which is what B would point to).
372  if (B != E)
373  ++B;
374 
375  do {
376  // Process the range of instructions from B to E.
377  for (NodeAddr<InstrNode*> I : make_range(B, E)) {
378  NodeList Refs = I.Addr->members(DFG);
379  NodeAddr<RefNode*> Clob, Use;
380  // Scan all the refs in I aliased to RefRR, and return the one that
381  // is the closest to the output of I, i.e. def > clobber > use.
382  for (NodeAddr<RefNode*> R : Refs) {
383  if (!PRI.alias(R.Addr->getRegRef(DFG), RefRR))
384  continue;
385  if (DFG.IsDef(R)) {
386  // If it's a non-clobbering def, just return it.
387  if (!(R.Addr->getFlags() & NodeAttrs::Clobbering))
388  return R;
389  Clob = R;
390  } else {
391  Use = R;
392  }
393  }
394  if (Clob.Id != 0)
395  return Clob;
396  if (Use.Id != 0)
397  return Use;
398  }
399 
400  // Go up to the immediate dominator, if any.
401  MachineBasicBlock *BB = BA.Addr->getCode();
402  BA = NodeAddr<BlockNode*>();
403  if (MachineDomTreeNode *N = MDT.getNode(BB)) {
404  if ((N = N->getIDom()))
405  BA = DFG.findBlock(N->getBlock());
406  }
407  if (!BA.Id)
408  break;
409 
410  Ins = BA.Addr->members(DFG);
411  B = Ins.rbegin();
412  E = Ins.rend();
413  } while (true);
414 
415  return NodeAddr<RefNode*>();
416 }
417 
419  NodeAddr<DefNode*> DefA, const RegisterAggr &DefRRs) {
420  NodeSet Uses;
421 
422  // If the original register is already covered by all the intervening
423  // defs, no more uses can be reached.
424  if (DefRRs.hasCoverOf(RefRR))
425  return Uses;
426 
427  // Add all directly reached uses.
428  // If the def is dead, it does not provide a value for any use.
429  bool IsDead = DefA.Addr->getFlags() & NodeAttrs::Dead;
430  NodeId U = !IsDead ? DefA.Addr->getReachedUse() : 0;
431  while (U != 0) {
432  auto UA = DFG.addr<UseNode*>(U);
433  if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) {
434  RegisterRef UR = UA.Addr->getRegRef(DFG);
435  if (PRI.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR))
436  Uses.insert(U);
437  }
438  U = UA.Addr->getSibling();
439  }
440 
441  // Traverse all reached defs. This time dead defs cannot be ignored.
442  for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
443  auto DA = DFG.addr<DefNode*>(D);
444  NextD = DA.Addr->getSibling();
445  RegisterRef DR = DA.Addr->getRegRef(DFG);
446  // If this def is already covered, it cannot reach anything new.
447  // Similarly, skip it if it is not aliased to the interesting register.
448  if (DefRRs.hasCoverOf(DR) || !PRI.alias(RefRR, DR))
449  continue;
450  NodeSet T;
451  if (DFG.IsPreservingDef(DA)) {
452  // If it is a preserving def, do not update the set of intervening defs.
453  T = getAllReachedUses(RefRR, DA, DefRRs);
454  } else {
455  RegisterAggr NewDefRRs = DefRRs;
456  NewDefRRs.insert(DR);
457  T = getAllReachedUses(RefRR, DA, NewDefRRs);
458  }
459  Uses.insert(T.begin(), T.end());
460  }
461  return Uses;
462 }
463 
465  RealUseMap.clear();
466 
467  NodeList Phis;
468  NodeAddr<FuncNode*> FA = DFG.getFunc();
469  NodeList Blocks = FA.Addr->members(DFG);
470  for (NodeAddr<BlockNode*> BA : Blocks) {
471  auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
472  llvm::append_range(Phis, Ps);
473  }
474 
475  // phi use -> (map: reaching phi -> set of registers defined in between)
476  std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp;
477  std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation.
478  std::unordered_map<NodeId,RegisterAggr> PhiDRs; // Phi -> registers defined by it.
479 
480  // Go over all phis.
481  for (NodeAddr<PhiNode*> PhiA : Phis) {
482  // Go over all defs and collect the reached uses that are non-phi uses
483  // (i.e. the "real uses").
484  RefMap &RealUses = RealUseMap[PhiA.Id];
485  NodeList PhiRefs = PhiA.Addr->members(DFG);
486 
487  // Have a work queue of defs whose reached uses need to be found.
488  // For each def, add to the queue all reached (non-phi) defs.
489  SetVector<NodeId> DefQ;
490  NodeSet PhiDefs;
491  RegisterAggr DRs(PRI);
492  for (NodeAddr<RefNode*> R : PhiRefs) {
493  if (!DFG.IsRef<NodeAttrs::Def>(R))
494  continue;
495  DRs.insert(R.Addr->getRegRef(DFG));
496  DefQ.insert(R.Id);
497  PhiDefs.insert(R.Id);
498  }
499  PhiDRs.insert(std::make_pair(PhiA.Id, DRs));
500 
501  // Collect the super-set of all possible reached uses. This set will
502  // contain all uses reached from this phi, either directly from the
503  // phi defs, or (recursively) via non-phi defs reached by the phi defs.
504  // This set of uses will later be trimmed to only contain these uses that
505  // are actually reached by the phi defs.
506  for (unsigned i = 0; i < DefQ.size(); ++i) {
507  NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]);
508  // Visit all reached uses. Phi defs should not really have the "dead"
509  // flag set, but check it anyway for consistency.
510  bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead;
511  NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0;
512  while (UN != 0) {
513  NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN);
514  uint16_t F = A.Addr->getFlags();
515  if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) {
516  RegisterRef R = A.Addr->getRegRef(DFG);
517  RealUses[R.Reg].insert({A.Id,R.Mask});
518  }
519  UN = A.Addr->getSibling();
520  }
521  // Visit all reached defs, and add them to the queue. These defs may
522  // override some of the uses collected here, but that will be handled
523  // later.
524  NodeId DN = DA.Addr->getReachedDef();
525  while (DN != 0) {
526  NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN);
527  for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) {
528  uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags();
529  // Must traverse the reached-def chain. Consider:
530  // def(D0) -> def(R0) -> def(R0) -> use(D0)
531  // The reachable use of D0 passes through a def of R0.
532  if (!(Flags & NodeAttrs::PhiRef))
533  DefQ.insert(T.Id);
534  }
535  DN = A.Addr->getSibling();
536  }
537  }
538  // Filter out these uses that appear to be reachable, but really
539  // are not. For example:
540  //
541  // R1:0 = d1
542  // = R1:0 u2 Reached by d1.
543  // R0 = d3
544  // = R1:0 u4 Still reached by d1: indirectly through
545  // the def d3.
546  // R1 = d5
547  // = R1:0 u6 Not reached by d1 (covered collectively
548  // by d3 and d5), but following reached
549  // defs and uses from d1 will lead here.
550  for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
551  // For each reached register UI->first, there is a set UI->second, of
552  // uses of it. For each such use, check if it is reached by this phi,
553  // i.e. check if the set of its reaching uses intersects the set of
554  // this phi's defs.
555  NodeRefSet Uses = UI->second;
556  UI->second.clear();
557  for (std::pair<NodeId,LaneBitmask> I : Uses) {
558  auto UA = DFG.addr<UseNode*>(I.first);
559  // Undef flag is checked above.
560  assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
561  RegisterRef R(UI->first, I.second);
562  // Calculate the exposed part of the reached use.
563  RegisterAggr Covered(PRI);
564  for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
565  if (PhiDefs.count(DA.Id))
566  break;
567  Covered.insert(DA.Addr->getRegRef(DFG));
568  }
569  if (RegisterRef RC = Covered.clearIn(R)) {
570  // We are updating the map for register UI->first, so we need
571  // to map RC to be expressed in terms of that register.
572  RegisterRef S = PRI.mapTo(RC, UI->first);
573  UI->second.insert({I.first, S.Mask});
574  }
575  }
576  UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
577  }
578 
579  // If this phi reaches some "real" uses, add it to the queue for upward
580  // propagation.
581  if (!RealUses.empty())
582  PhiUQ.push_back(PhiA.Id);
583 
584  // Go over all phi uses and check if the reaching def is another phi.
585  // Collect the phis that are among the reaching defs of these uses.
586  // While traversing the list of reaching defs for each phi use, accumulate
587  // the set of registers defined between this phi (PhiA) and the owner phi
588  // of the reaching def.
589  NodeSet SeenUses;
590 
591  for (auto I : PhiRefs) {
592  if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id))
593  continue;
594  NodeAddr<PhiUseNode*> PUA = I;
595  if (PUA.Addr->getReachingDef() == 0)
596  continue;
597 
598  RegisterRef UR = PUA.Addr->getRegRef(DFG);
599  NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs);
600  RegisterAggr DefRRs(PRI);
601 
602  for (NodeAddr<DefNode*> D : Ds) {
603  if (D.Addr->getFlags() & NodeAttrs::PhiRef) {
604  NodeId RP = D.Addr->getOwner(DFG).Id;
605  std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id];
606  auto F = M.find(RP);
607  if (F == M.end())
608  M.insert(std::make_pair(RP, DefRRs));
609  else
610  F->second.insert(DefRRs);
611  }
612  DefRRs.insert(D.Addr->getRegRef(DFG));
613  }
614 
615  for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA))
616  SeenUses.insert(T.Id);
617  }
618  }
619 
620  if (Trace) {
621  dbgs() << "Phi-up-to-phi map with intervening defs:\n";
622  for (auto I : PhiUp) {
623  dbgs() << "phi " << Print<NodeId>(I.first, DFG) << " -> {";
624  for (auto R : I.second)
625  dbgs() << ' ' << Print<NodeId>(R.first, DFG)
626  << Print<RegisterAggr>(R.second, DFG);
627  dbgs() << " }\n";
628  }
629  }
630 
631  // Propagate the reached registers up in the phi chain.
632  //
633  // The following type of situation needs careful handling:
634  //
635  // phi d1<R1:0> (1)
636  // |
637  // ... d2<R1>
638  // |
639  // phi u3<R1:0> (2)
640  // |
641  // ... u4<R1>
642  //
643  // The phi node (2) defines a register pair R1:0, and reaches a "real"
644  // use u4 of just R1. The same phi node is also known to reach (upwards)
645  // the phi node (1). However, the use u4 is not reached by phi (1),
646  // because of the intervening definition d2 of R1. The data flow between
647  // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0.
648  //
649  // When propagating uses up the phi chains, get the all reaching defs
650  // for a given phi use, and traverse the list until the propagated ref
651  // is covered, or until reaching the final phi. Only assume that the
652  // reference reaches the phi in the latter case.
653 
654  // The operation "clearIn" can be expensive. For a given set of intervening
655  // defs, cache the result of subtracting these defs from a given register
656  // ref.
657  using SubMap = std::unordered_map<RegisterRef, RegisterRef>;
658  std::unordered_map<RegisterAggr, SubMap> Subs;
659  auto ClearIn = [] (RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) {
660  if (Mid.empty())
661  return RR;
662  auto F = SM.find(RR);
663  if (F != SM.end())
664  return F->second;
665  RegisterRef S = Mid.clearIn(RR);
666  SM.insert({RR, S});
667  return S;
668  };
669 
670  // Go over all phis.
671  for (unsigned i = 0; i < PhiUQ.size(); ++i) {
672  auto PA = DFG.addr<PhiNode*>(PhiUQ[i]);
673  NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG);
674  RefMap &RUM = RealUseMap[PA.Id];
675 
676  for (NodeAddr<UseNode*> UA : PUs) {
677  std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id];
678  RegisterRef UR = UA.Addr->getRegRef(DFG);
679  for (const std::pair<const NodeId, RegisterAggr> &P : PUM) {
680  bool Changed = false;
681  const RegisterAggr &MidDefs = P.second;
682  // Collect the set PropUp of uses that are reached by the current
683  // phi PA, and are not covered by any intervening def between the
684  // currently visited use UA and the upward phi P.
685 
686  if (MidDefs.hasCoverOf(UR))
687  continue;
688  SubMap &SM = Subs[MidDefs];
689 
690  // General algorithm:
691  // for each (R,U) : U is use node of R, U is reached by PA
692  // if MidDefs does not cover (R,U)
693  // then add (R-MidDefs,U) to RealUseMap[P]
694  //
695  for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) {
696  RegisterRef R(T.first);
697  // The current phi (PA) could be a phi for a regmask. It could
698  // reach a whole variety of uses that are not related to the
699  // specific upward phi (P.first).
700  const RegisterAggr &DRs = PhiDRs.at(P.first);
701  if (!DRs.hasAliasOf(R))
702  continue;
703  R = PRI.mapTo(DRs.intersectWith(R), T.first);
704  for (std::pair<NodeId,LaneBitmask> V : T.second) {
705  LaneBitmask M = R.Mask & V.second;
706  if (M.none())
707  continue;
708  if (RegisterRef SS = ClearIn(RegisterRef(R.Reg, M), MidDefs, SM)) {
709  NodeRefSet &RS = RealUseMap[P.first][SS.Reg];
710  Changed |= RS.insert({V.first,SS.Mask}).second;
711  }
712  }
713  }
714 
715  if (Changed)
716  PhiUQ.push_back(P.first);
717  }
718  }
719  }
720 
721  if (Trace) {
722  dbgs() << "Real use map:\n";
723  for (auto I : RealUseMap) {
724  dbgs() << "phi " << Print<NodeId>(I.first, DFG);
725  NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first);
726  NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG);
727  if (!Ds.empty()) {
728  RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG);
729  dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>';
730  } else {
731  dbgs() << "<noreg>";
732  }
733  dbgs() << " -> " << Print<RefMap>(I.second, DFG) << '\n';
734  }
735  }
736 }
737 
739  // Populate the node-to-block map. This speeds up the calculations
740  // significantly.
741  NBMap.clear();
742  for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG)) {
743  MachineBasicBlock *BB = BA.Addr->getCode();
744  for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
745  for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG))
746  NBMap.insert(std::make_pair(RA.Id, BB));
747  NBMap.insert(std::make_pair(IA.Id, BB));
748  }
749  }
750 
751  MachineFunction &MF = DFG.getMF();
752 
753  // Compute IDF first, then the inverse.
754  decltype(IIDF) IDF;
755  for (MachineBasicBlock &B : MF) {
756  auto F1 = MDF.find(&B);
757  if (F1 == MDF.end())
758  continue;
759  SetVector<MachineBasicBlock*> IDFB(F1->second.begin(), F1->second.end());
760  for (unsigned i = 0; i < IDFB.size(); ++i) {
761  auto F2 = MDF.find(IDFB[i]);
762  if (F2 != MDF.end())
763  IDFB.insert(F2->second.begin(), F2->second.end());
764  }
765  // Add B to the IDF(B). This will put B in the IIDF(B).
766  IDFB.insert(&B);
767  IDF[&B].insert(IDFB.begin(), IDFB.end());
768  }
769 
770  for (auto I : IDF)
771  for (auto S : I.second)
772  IIDF[S].insert(I.first);
773 
774  computePhiInfo();
775 
776  NodeAddr<FuncNode*> FA = DFG.getFunc();
777  NodeList Blocks = FA.Addr->members(DFG);
778 
779  // Build the phi live-on-entry map.
780  for (NodeAddr<BlockNode*> BA : Blocks) {
781  MachineBasicBlock *MB = BA.Addr->getCode();
782  RefMap &LON = PhiLON[MB];
783  for (auto P : BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG))
784  for (const RefMap::value_type &S : RealUseMap[P.Id])
785  LON[S.first].insert(S.second.begin(), S.second.end());
786  }
787 
788  if (Trace) {
789  dbgs() << "Phi live-on-entry map:\n";
790  for (auto &I : PhiLON)
791  dbgs() << "block #" << I.first->getNumber() << " -> "
792  << Print<RefMap>(I.second, DFG) << '\n';
793  }
794 
795  // Build the phi live-on-exit map. Each phi node has some set of reached
796  // "real" uses. Propagate this set backwards into the block predecessors
797  // through the reaching defs of the corresponding phi uses.
798  for (NodeAddr<BlockNode*> BA : Blocks) {
799  NodeList Phis = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
800  for (NodeAddr<PhiNode*> PA : Phis) {
801  RefMap &RUs = RealUseMap[PA.Id];
802  if (RUs.empty())
803  continue;
804 
805  NodeSet SeenUses;
806  for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
807  if (!SeenUses.insert(U.Id).second)
808  continue;
809  NodeAddr<PhiUseNode*> PUA = U;
810  if (PUA.Addr->getReachingDef() == 0)
811  continue;
812 
813  // Each phi has some set (possibly empty) of reached "real" uses,
814  // that is, uses that are part of the compiled program. Such a use
815  // may be located in some farther block, but following a chain of
816  // reaching defs will eventually lead to this phi.
817  // Any chain of reaching defs may fork at a phi node, but there
818  // will be a path upwards that will lead to this phi. Now, this
819  // chain will need to fork at this phi, since some of the reached
820  // uses may have definitions joining in from multiple predecessors.
821  // For each reached "real" use, identify the set of reaching defs
822  // coming from each predecessor P, and add them to PhiLOX[P].
823  //
824  auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor());
825  RefMap &LOX = PhiLOX[PrA.Addr->getCode()];
826 
827  for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) {
828  // We need to visit each individual use.
829  for (std::pair<NodeId,LaneBitmask> P : RS.second) {
830  // Create a register ref corresponding to the use, and find
831  // all reaching defs starting from the phi use, and treating
832  // all related shadows as a single use cluster.
833  RegisterRef S(RS.first, P.second);
834  NodeList Ds = getAllReachingDefs(S, PUA, true, false, NoRegs);
835  for (NodeAddr<DefNode*> D : Ds) {
836  // Calculate the mask corresponding to the visited def.
837  RegisterAggr TA(PRI);
838  TA.insert(D.Addr->getRegRef(DFG)).intersect(S);
839  LaneBitmask TM = TA.makeRegRef().Mask;
840  LOX[S.Reg].insert({D.Id, TM});
841  }
842  }
843  }
844 
845  for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PA, PUA))
846  SeenUses.insert(T.Id);
847  } // for U : phi uses
848  } // for P : Phis
849  } // for B : Blocks
850 
851  if (Trace) {
852  dbgs() << "Phi live-on-exit map:\n";
853  for (auto &I : PhiLOX)
854  dbgs() << "block #" << I.first->getNumber() << " -> "
855  << Print<RefMap>(I.second, DFG) << '\n';
856  }
857 
858  RefMap LiveIn;
859  traverse(&MF.front(), LiveIn);
860 
861  // Add function live-ins to the live-in set of the function entry block.
862  LiveMap[&MF.front()].insert(DFG.getLiveIns());
863 
864  if (Trace) {
865  // Dump the liveness map
866  for (MachineBasicBlock &B : MF) {
867  std::vector<RegisterRef> LV;
868  for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
869  LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask));
870  llvm::sort(LV);
871  dbgs() << printMBBReference(B) << "\t rec = {";
872  for (auto I : LV)
873  dbgs() << ' ' << Print<RegisterRef>(I, DFG);
874  dbgs() << " }\n";
875  //dbgs() << "\tcomp = " << Print<RegisterAggr>(LiveMap[&B], DFG) << '\n';
876 
877  LV.clear();
878  const RegisterAggr &LG = LiveMap[&B];
879  for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
880  LV.push_back(*I);
881  llvm::sort(LV);
882  dbgs() << "\tcomp = {";
883  for (auto I : LV)
884  dbgs() << ' ' << Print<RegisterRef>(I, DFG);
885  dbgs() << " }\n";
886 
887  }
888  }
889 }
890 
892  for (auto &B : DFG.getMF()) {
893  // Remove all live-ins.
894  std::vector<unsigned> T;
895  for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
896  T.push_back(LI.PhysReg);
897  for (auto I : T)
898  B.removeLiveIn(I);
899  // Add the newly computed live-ins.
900  const RegisterAggr &LiveIns = LiveMap[&B];
901  for (const RegisterRef R : make_range(LiveIns.rr_begin(), LiveIns.rr_end()))
902  B.addLiveIn({MCPhysReg(R.Reg), R.Mask});
903  }
904 }
905 
907  for (auto &B : DFG.getMF())
908  resetKills(&B);
909 }
910 
912  auto CopyLiveIns = [this] (MachineBasicBlock *B, BitVector &LV) -> void {
913  for (auto I : B->liveins()) {
914  MCSubRegIndexIterator S(I.PhysReg, &TRI);
915  if (!S.isValid()) {
916  LV.set(I.PhysReg);
917  continue;
918  }
919  do {
920  LaneBitmask M = TRI.getSubRegIndexLaneMask(S.getSubRegIndex());
921  if ((M & I.LaneMask).any())
922  LV.set(S.getSubReg());
923  ++S;
924  } while (S.isValid());
925  }
926  };
927 
928  BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs());
929  CopyLiveIns(B, LiveIn);
930  for (auto SI : B->successors())
931  CopyLiveIns(SI, Live);
932 
933  for (MachineInstr &MI : llvm::reverse(*B)) {
934  if (MI.isDebugInstr())
935  continue;
936 
937  MI.clearKillInfo();
938  for (auto &Op : MI.operands()) {
939  // An implicit def of a super-register may not necessarily start a
940  // live range of it, since an implicit use could be used to keep parts
941  // of it live. Instead of analyzing the implicit operands, ignore
942  // implicit defs.
943  if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
944  continue;
945  Register R = Op.getReg();
947  continue;
948  for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
949  Live.reset(*SR);
950  }
951  for (auto &Op : MI.operands()) {
952  if (!Op.isReg() || !Op.isUse() || Op.isUndef())
953  continue;
954  Register R = Op.getReg();
956  continue;
957  bool IsLive = false;
958  for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
959  if (!Live[*AR])
960  continue;
961  IsLive = true;
962  break;
963  }
964  if (!IsLive)
965  Op.setIsKill(true);
966  for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
967  Live.set(*SR);
968  }
969  }
970 }
971 
972 // Helper function to obtain the basic block containing the reaching def
973 // of the given use.
974 MachineBasicBlock *Liveness::getBlockWithRef(NodeId RN) const {
975  auto F = NBMap.find(RN);
976  if (F != NBMap.end())
977  return F->second;
978  llvm_unreachable("Node id not in map");
979 }
980 
981 void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
982  // The LiveIn map, for each (physical) register, contains the set of live
983  // reaching defs of that register that are live on entry to the associated
984  // block.
985 
986  // The summary of the traversal algorithm:
987  //
988  // R is live-in in B, if there exists a U(R), such that rdef(R) dom B
989  // and (U \in IDF(B) or B dom U).
990  //
991  // for (C : children) {
992  // LU = {}
993  // traverse(C, LU)
994  // LiveUses += LU
995  // }
996  //
997  // LiveUses -= Defs(B);
998  // LiveUses += UpwardExposedUses(B);
999  // for (C : IIDF[B])
1000  // for (U : LiveUses)
1001  // if (Rdef(U) dom C)
1002  // C.addLiveIn(U)
1003  //
1004 
1005  // Go up the dominator tree (depth-first).
1006  MachineDomTreeNode *N = MDT.getNode(B);
1007  for (auto I : *N) {
1008  RefMap L;
1009  MachineBasicBlock *SB = I->getBlock();
1010  traverse(SB, L);
1011 
1012  for (auto S : L)
1013  LiveIn[S.first].insert(S.second.begin(), S.second.end());
1014  }
1015 
1016  if (Trace) {
1017  dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
1018  << " after recursion into: {";
1019  for (auto I : *N)
1020  dbgs() << ' ' << I->getBlock()->getNumber();
1021  dbgs() << " }\n";
1022  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1023  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1024  }
1025 
1026  // Add reaching defs of phi uses that are live on exit from this block.
1027  RefMap &PUs = PhiLOX[B];
1028  for (auto &S : PUs)
1029  LiveIn[S.first].insert(S.second.begin(), S.second.end());
1030 
1031  if (Trace) {
1032  dbgs() << "after LOX\n";
1033  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1034  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1035  }
1036 
1037  // The LiveIn map at this point has all defs that are live-on-exit from B,
1038  // as if they were live-on-entry to B. First, we need to filter out all
1039  // defs that are present in this block. Then we will add reaching defs of
1040  // all upward-exposed uses.
1041 
1042  // To filter out the defs, first make a copy of LiveIn, and then re-populate
1043  // LiveIn with the defs that should remain.
1044  RefMap LiveInCopy = LiveIn;
1045  LiveIn.clear();
1046 
1047  for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) {
1048  RegisterRef LRef(LE.first);
1049  NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled.
1050  const NodeRefSet &OldDefs = LE.second;
1051  for (NodeRef OR : OldDefs) {
1052  // R is a def node that was live-on-exit
1053  auto DA = DFG.addr<DefNode*>(OR.first);
1054  NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG);
1055  NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
1056  if (B != BA.Addr->getCode()) {
1057  // Defs from a different block need to be preserved. Defs from this
1058  // block will need to be processed further, except for phi defs, the
1059  // liveness of which is handled through the PhiLON/PhiLOX maps.
1060  NewDefs.insert(OR);
1061  continue;
1062  }
1063 
1064  // Defs from this block need to stop the liveness from being
1065  // propagated upwards. This only applies to non-preserving defs,
1066  // and to the parts of the register actually covered by those defs.
1067  // (Note that phi defs should always be preserving.)
1068  RegisterAggr RRs(PRI);
1069  LRef.Mask = OR.second;
1070 
1071  if (!DFG.IsPreservingDef(DA)) {
1072  assert(!(IA.Addr->getFlags() & NodeAttrs::Phi));
1073  // DA is a non-phi def that is live-on-exit from this block, and
1074  // that is also located in this block. LRef is a register ref
1075  // whose use this def reaches. If DA covers LRef, then no part
1076  // of LRef is exposed upwards.A
1077  if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef))
1078  continue;
1079  }
1080 
1081  // DA itself was not sufficient to cover LRef. In general, it is
1082  // the last in a chain of aliased defs before the exit from this block.
1083  // There could be other defs in this block that are a part of that
1084  // chain. Check that now: accumulate the registers from these defs,
1085  // and if they all together cover LRef, it is not live-on-entry.
1087  // DefNode -> InstrNode -> BlockNode.
1088  NodeAddr<InstrNode*> ITA = TA.Addr->getOwner(DFG);
1089  NodeAddr<BlockNode*> BTA = ITA.Addr->getOwner(DFG);
1090  // Reaching defs are ordered in the upward direction.
1091  if (BTA.Addr->getCode() != B) {
1092  // We have reached past the beginning of B, and the accumulated
1093  // registers are not covering LRef. The first def from the
1094  // upward chain will be live.
1095  // Subtract all accumulated defs (RRs) from LRef.
1096  RegisterRef T = RRs.clearIn(LRef);
1097  assert(T);
1098  NewDefs.insert({TA.Id,T.Mask});
1099  break;
1100  }
1101 
1102  // TA is in B. Only add this def to the accumulated cover if it is
1103  // not preserving.
1104  if (!(TA.Addr->getFlags() & NodeAttrs::Preserving))
1105  RRs.insert(TA.Addr->getRegRef(DFG));
1106  // If this is enough to cover LRef, then stop.
1107  if (RRs.hasCoverOf(LRef))
1108  break;
1109  }
1110  }
1111  }
1112 
1113  emptify(LiveIn);
1114 
1115  if (Trace) {
1116  dbgs() << "after defs in block\n";
1117  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1118  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1119  }
1120 
1121  // Scan the block for upward-exposed uses and add them to the tracking set.
1122  for (auto I : DFG.getFunc().Addr->findBlock(B, DFG).Addr->members(DFG)) {
1123  NodeAddr<InstrNode*> IA = I;
1124  if (IA.Addr->getKind() != NodeAttrs::Stmt)
1125  continue;
1126  for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
1127  if (UA.Addr->getFlags() & NodeAttrs::Undef)
1128  continue;
1129  RegisterRef RR = UA.Addr->getRegRef(DFG);
1131  if (getBlockWithRef(D.Id) != B)
1132  LiveIn[RR.Reg].insert({D.Id,RR.Mask});
1133  }
1134  }
1135 
1136  if (Trace) {
1137  dbgs() << "after uses in block\n";
1138  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1139  dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1140  }
1141 
1142  // Phi uses should not be propagated up the dominator tree, since they
1143  // are not dominated by their corresponding reaching defs.
1144  RegisterAggr &Local = LiveMap[B];
1145  RefMap &LON = PhiLON[B];
1146  for (auto &R : LON) {
1147  LaneBitmask M;
1148  for (auto P : R.second)
1149  M |= P.second;
1150  Local.insert(RegisterRef(R.first,M));
1151  }
1152 
1153  if (Trace) {
1154  dbgs() << "after phi uses in block\n";
1155  dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1156  dbgs() << " Local: " << Print<RegisterAggr>(Local, DFG) << '\n';
1157  }
1158 
1159  for (auto C : IIDF[B]) {
1160  RegisterAggr &LiveC = LiveMap[C];
1161  for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn)
1162  for (auto R : S.second)
1163  if (MDT.properlyDominates(getBlockWithRef(R.first), C))
1164  LiveC.insert(RegisterRef(S.first, R.second));
1165  }
1166 }
1167 
1168 void Liveness::emptify(RefMap &M) {
1169  for (auto I = M.begin(), E = M.end(); I != E; )
1170  I = I->second.empty() ? M.erase(I) : std::next(I);
1171 }
llvm::LaneBitmask
Definition: LaneBitmask.h:40
llvm::rdf::Print
Definition: RDFGraph.h:924
i
i
Definition: README.txt:29
llvm::X86II::OB
@ OB
Definition: X86BaseInfo.h:801
llvm::rdf::DataFlowGraph::getMF
MachineFunction & getMF() const
Definition: RDFGraph.h:661
llvm::rdf::RegisterAggr::rr_end
rr_iterator rr_end() const
Definition: RDFRegisters.h:240
llvm::rdf::PhiUseNode::getPredecessor
NodeId getPredecessor() const
Definition: RDFGraph.h:580
IsDead
bool IsDead
Definition: SILowerControlFlow.cpp:167
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
MachineInstr.h
llvm::rdf::BlockNode
Definition: RDFGraph.h:626
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::rdf::RegisterRef::Reg
RegisterId Reg
Definition: RDFRegisters.h:72
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::MCSubRegIndexIterator
Iterator that enumerates the sub-registers of a Reg and the associated sub-register indices.
Definition: MCRegisterInfo.h:610
llvm::rdf::DataFlowGraph::getRelatedRefs
NodeList getRelatedRefs(NodeAddr< InstrNode * > IA, NodeAddr< RefNode * > RA) const
Definition: RDFGraph.cpp:1123
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::rdf::DataFlowGraph::IsPreservingDef
static bool IsPreservingDef(const NodeAddr< DefNode * > DA)
Definition: RDFGraph.h:808
llvm::rdf::CodeNode::members_if
NodeList members_if(Predicate P, const DataFlowGraph &G) const
Definition: RDFGraph.h:909
llvm::ISD::OR
@ OR
Definition: ISDOpcodes.h:667
llvm::rdf::InstrNode
Definition: RDFGraph.h:610
llvm::rdf::RegisterAggr::insert
RegisterAggr & insert(RegisterRef RR)
Definition: RDFRegisters.cpp:273
llvm::MachineDominatorTree::properlyDominates
bool properlyDominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
Definition: MachineDominators.h:146
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::rdf::NodeAttrs::Phi
@ Phi
Definition: RDFGraph.h:277
llvm::rdf::DefNode::getReachedUse
NodeId getReachedUse() const
Definition: RDFGraph.h:565
llvm::MachineDominanceFrontier::end
iterator end()
Definition: MachineDominanceFrontier.h:60
llvm::BitVector::set
BitVector & set()
Definition: BitVector.h:344
llvm::rdf::Liveness::RefMap
std::unordered_map< RegisterId, NodeRefSet > RefMap
Definition: RDFLiveness.h:77
llvm::X86AS::SS
@ SS
Definition: X86.h:189
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::printMBBReference
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
Definition: MachineBasicBlock.cpp:116
ErrorHandling.h
llvm::MCRegisterInfo::getNumRegs
unsigned getNumRegs() const
Return the number of registers this target has (useful for sizing arrays holding per register informa...
Definition: MCRegisterInfo.h:491
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1795
llvm::rdf::NodeAttrs::PhiRef
@ PhiRef
Definition: RDFGraph.h:286
MachineBasicBlock.h
llvm::rdf::Liveness::getAllReachedUses
NodeSet getAllReachedUses(RegisterRef RefRR, NodeAddr< DefNode * > DefA, const RegisterAggr &DefRRs)
Definition: RDFLiveness.cpp:418
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
DenseMap.h
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:380
llvm::X86II::TA
@ TA
Definition: X86BaseInfo.h:808
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:136
llvm::rdf::Liveness::getAllReachingDefsRec
std::pair< NodeSet, bool > getAllReachingDefsRec(RegisterRef RefRR, NodeAddr< RefNode * > RefA, NodeSet &Visited, const NodeSet &Defs)
Definition: RDFLiveness.cpp:308
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::MachineDominanceFrontier::find
iterator find(MachineBasicBlock *B)
Definition: MachineDominanceFrontier.h:68
STLExtras.h
llvm::rdf::Liveness::getNearestAliasedRef
NodeAddr< RefNode * > getNearestAliasedRef(RegisterRef RefRR, NodeAddr< InstrNode * > IA)
Find the nearest ref node aliased to RefRR, going upwards in the data flow, starting from the instruc...
Definition: RDFLiveness.cpp:361
RDFRegisters.h
llvm::rdf::RegisterRef
Definition: RDFRegisters.h:71
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:240
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:585
llvm::rdf::NodeBase::getFlags
uint16_t getFlags() const
Definition: RDFGraph.h:456
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
RDFLiveness.h
llvm::rdf::DataFlowGraph::getFunc
NodeAddr< FuncNode * > getFunc() const
Definition: RDFGraph.h:660
llvm::rdf::RegisterRef::Mask
LaneBitmask Mask
Definition: RDFRegisters.h:73
llvm::SetVector::begin
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
llvm::TargetRegisterInfo::getSubRegIndexLaneMask
LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const
Return a bitmask representing the parts of a register that are covered by SubIdx.
Definition: TargetRegisterInfo.h:376
llvm::rdf::NodeAddr
Definition: RDFGraph.h:334
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:480
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::rdf::RefNode::getRegRef
RegisterRef getRegRef(const DataFlowGraph &G) const
Definition: RDFGraph.cpp:406
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::Register::isPhysicalRegister
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:65
llvm::rdf::PhysicalRegisterInfo::alias
bool alias(RegisterRef RA, RegisterRef RB) const
Definition: RDFRegisters.h:118
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
NodeList
Definition: MicrosoftDemangle.cpp:37
llvm::MachineBasicBlock::RegisterMaskPair
Pair of physical register and lane mask.
Definition: MachineBasicBlock.h:100
llvm::rdf::RegisterAggr
Definition: RDFRegisters.h:168
llvm::rdf::NodeId
uint32_t NodeId
Definition: RDFGraph.h:260
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:54
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:268
BitVector.h
llvm::rdf::DataFlowGraph::IsDef
static bool IsDef(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:793
llvm::BitVector
Definition: BitVector.h:75
llvm::rdf::RegisterAggr::isCoverOf
static bool isCoverOf(RegisterRef RA, RegisterRef RB, const PhysicalRegisterInfo &PRI)
Definition: RDFRegisters.h:182
llvm::rdf::NodeAddr::Addr
T Addr
Definition: RDFGraph.h:351
llvm::NodeSet
A NodeSet contains a set of SUnit DAG nodes with additional information that assigns a priority to th...
Definition: MachinePipeliner.h:320
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::rdf::RegisterAggr::hasCoverOf
bool hasCoverOf(RegisterRef RR) const
Definition: RDFRegisters.cpp:258
llvm::rdf::NodeAttrs::Undef
@ Undef
Definition: RDFGraph.h:289
llvm::tgtok::In
@ In
Definition: TGLexer.h:51
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::rdf::DataFlowGraph::IsCode
static bool IsCode(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:788
llvm::SmallSet::count
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
llvm::rdf::PrintLaneMaskOpt
Definition: RDFRegisters.h:250
llvm::rdf::DataFlowGraph::addr
NodeAddr< T > addr(NodeId N) const
Definition: RDFGraph.h:656
llvm::rdf::DataFlowGraph::IsRef
static bool IsRef(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:782
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
llvm::rdf::RegisterAggr::hasAliasOf
bool hasAliasOf(RegisterRef RR) const
Definition: RDFRegisters.cpp:245
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
llvm::rdf::PhiNode
Definition: RDFGraph.h:614
llvm::rdf::CodeNode::members
NodeList members(const DataFlowGraph &G) const
Definition: RDFGraph.cpp:525
llvm::MachineDominatorTree::getNode
MachineDomTreeNode * getNode(MachineBasicBlock *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition: MachineDominators.h:174
llvm::rdf::UseNode
Definition: RDFGraph.h:575
llvm::DenseMap
Definition: DenseMap.h:716
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::MCPhysReg
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:21
llvm::rdf::Liveness::NodeRef
detail::NodeRef NodeRef
Definition: RDFLiveness.h:75
llvm::rdf::operator<<
raw_ostream & operator<<(raw_ostream &OS, const Print< RegisterRef > &P)
Definition: RDFGraph.cpp:55
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
MCRegisterInfo.h
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:152
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::rdf::NodeAttrs::Clobbering
@ Clobbering
Definition: RDFGraph.h:285
RA
SI optimize exec mask operations pre RA
Definition: SIOptimizeExecMaskingPreRA.cpp:71
llvm::rdf::Liveness::resetKills
void resetKills()
Definition: RDFLiveness.cpp:906
llvm::MachineFunction
Definition: MachineFunction.h:241
llvm::SetVector::insert
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
llvm::rdf::RegisterAggr::rr_begin
rr_iterator rr_begin() const
Definition: RDFRegisters.h:237
llvm::rdf::NodeBase::getKind
uint16_t getKind() const
Definition: RDFGraph.h:455
RDFGraph.h
llvm::rdf::NodeAttrs::Stmt
@ Stmt
Definition: RDFGraph.h:278
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
uint32_t
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1811
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
llvm::rdf::DataFlowGraph::IsUse
static bool IsUse(const NodeAddr< NodeBase * > BA)
Definition: RDFGraph.h:798
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::MachineInstr::getParent
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:288
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:182
llvm::DomTreeNodeBase
Base class for the actual dominator tree node.
Definition: LiveIntervalCalc.h:24
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:209
llvm::Trace
Definition: Trace.h:30
llvm::rdf::DataFlowGraph::getLiveIns
const RegisterAggr & getLiveIns() const
Definition: RDFGraph.h:667
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1632
llvm::rdf::NodeAttrs::Dead
@ Dead
Definition: RDFGraph.h:290
llvm::pdb::PDB_DataKind::Local
@ Local
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::rdf::RegisterAggr::intersectWith
RegisterRef intersectWith(RegisterRef RR) const
Definition: RDFRegisters.cpp:310
uint16_t
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:326
llvm::rdf::NodeAttrs::Def
@ Def
Definition: RDFGraph.h:275
MachineDominanceFrontier.h
llvm::rdf::NodeAttrs::Preserving
@ Preserving
Definition: RDFGraph.h:287
llvm::rdf::DefNode
Definition: RDFGraph.h:558
llvm::rdf::Liveness::computePhiInfo
void computePhiInfo()
Definition: RDFLiveness.cpp:464
llvm::sort
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1550
llvm::rdf::Liveness::resetLiveIns
void resetLiveIns()
Definition: RDFLiveness.cpp:891
llvm::rdf::RefNode::getOwner
NodeAddr< NodeBase * > getOwner(const DataFlowGraph &G)
Definition: RDFGraph.cpp:432
llvm::rdf::DefNode::getReachedDef
NodeId getReachedDef() const
Definition: RDFGraph.h:559
llvm::M68kBeads::DA
@ DA
Definition: M68kBaseInfo.h:59
llvm::BitVector::reset
BitVector & reset()
Definition: BitVector.h:385
llvm::MCSubRegIterator
MCSubRegIterator enumerates all sub-registers of Reg.
Definition: MCRegisterInfo.h:597
llvm::rdf::NodeAddr::Id
NodeId Id
Definition: RDFGraph.h:352
llvm::rdf::RefNode::getReachingDef
NodeId getReachingDef() const
Definition: RDFGraph.h:528
llvm::SetVector::end
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
llvm::MCRegAliasIterator::isValid
bool isValid() const
Definition: MCRegisterInfo.h:813
llvm::rdf::RegisterAggr::clearIn
RegisterRef clearIn(RegisterRef RR) const
Definition: RDFRegisters.cpp:320
llvm::rdf::NodeSet
std::set< NodeId > NodeSet
Definition: RDFGraph.h:513
N
#define N
llvm::MCRegisterInfo::DiffListIterator::isValid
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
Definition: MCRegisterInfo.h:224
llvm::rdf::BlockNode::getCode
MachineBasicBlock * getCode() const
Definition: RDFGraph.h:627
LaneBitmask.h
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
MaxRecNest
static cl::opt< unsigned > MaxRecNest("rdf-liveness-max-rec", cl::init(25), cl::Hidden, cl::desc("Maximum recursion level"))
llvm::rdf::RefNode
Definition: RDFGraph.h:515
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::NodeSet::insert
bool insert(SUnit *SU)
Definition: MachinePipeliner.h:354
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::rdf::Liveness::NodeRefSet
std::unordered_set< NodeRef > NodeRefSet
Definition: RDFLiveness.h:76
RN
It looks like we only need to define PPCfmarto for these because according to these instructions perform RTO on fma s src2 rnd ← FPSCR RN
Definition: README_P9.txt:262
llvm::cl::desc
Definition: CommandLine.h:405
llvm::rdf::Liveness::getAllReachingDefs
NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr< RefNode * > RefA, bool TopShadows, bool FullChain, const RegisterAggr &DefRRs)
Definition: RDFLiveness.cpp:108
raw_ostream.h
llvm::SetVector
A vector that has set insertion semantics.
Definition: SetVector.h:40
MachineFunction.h
llvm::printReg
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Definition: TargetRegisterInfo.cpp:111
llvm::rdf::DataFlowGraph::findBlock
NodeAddr< BlockNode * > findBlock(MachineBasicBlock *BB) const
Definition: RDFGraph.h:764
llvm::rdf::NodeAttrs::Use
@ Use
Definition: RDFGraph.h:276
TargetRegisterInfo.h
llvm::rdf::PhysicalRegisterInfo::mapTo
RegisterRef mapTo(RegisterRef RR, unsigned R) const
Definition: RDFRegisters.cpp:230
SetVector.h
llvm::MCRegAliasIterator
MCRegAliasIterator enumerates all registers aliasing Reg.
Definition: MCRegisterInfo.h:788
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
MachineDominators.h
SmallSet.h
llvm::rdf::InstrNode::getOwner
NodeAddr< NodeBase * > getOwner(const DataFlowGraph &G)
Definition: RDFGraph.cpp:531
llvm::rdf::Liveness::computeLiveIns
void computeLiveIns()
Definition: RDFLiveness.cpp:738