LLVM 23.0.0git
SILowerI1Copies.cpp
Go to the documentation of this file.
1//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11// form and a wave-level control flow graph.
12//
13// Before this pass, values that are semantically i1 and are defined and used
14// within the same basic block are already represented as lane masks in scalar
15// registers. However, values that cross basic blocks are always transferred
16// between basic blocks in vreg_1 virtual registers and are lowered by this
17// pass.
18//
19// The only instructions that use or define vreg_1 virtual registers are COPY,
20// PHI, and IMPLICIT_DEF.
21//
22//===----------------------------------------------------------------------===//
23
24#include "SILowerI1Copies.h"
25#include "AMDGPU.h"
28
29#define DEBUG_TYPE "si-i1-copies"
30
31using namespace llvm;
32
33static Register
35 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs);
36
37namespace {
38
39class Vreg1LoweringHelper : public AMDGPU::PhiLoweringHelper {
40public:
41 Vreg1LoweringHelper(MachineFunction *MF, MachineDominatorTree *DT,
43
44private:
45 DenseSet<Register> ConstrainRegs;
46
47public:
48 void markAsLaneMask(Register DstReg) const override;
49 void getCandidatesForLowering(
50 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const override;
51 void collectIncomingValuesFromPhi(
52 const MachineInstr *MI,
53 SmallVectorImpl<AMDGPU::Incoming> &Incomings) const override;
54 void replaceDstReg(Register NewReg, Register OldReg,
55 MachineBasicBlock *MBB) override;
56 void buildMergeLaneMasks(MachineBasicBlock &MBB,
58 Register DstReg, Register PrevReg,
59 Register CurReg) override;
60 void constrainAsLaneMask(AMDGPU::Incoming &In) override;
61
62 bool lowerCopiesFromI1();
63 bool lowerCopiesToI1();
64 bool cleanConstrainRegs(bool Changed);
65 bool isVreg1(Register Reg) const {
66 return Reg.isVirtual() && MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
67 }
68};
69
70Vreg1LoweringHelper::Vreg1LoweringHelper(MachineFunction *MF,
73 : PhiLoweringHelper(MF, DT, PDT) {}
74
75bool Vreg1LoweringHelper::cleanConstrainRegs(bool Changed) {
76 assert(Changed || ConstrainRegs.empty());
77 for (Register Reg : ConstrainRegs)
78 MRI->constrainRegClass(Reg, TII->getRegisterInfo().getWaveMaskRegClass());
79 ConstrainRegs.clear();
80
81 return Changed;
82}
83
84/// Helper class that determines the relationship between incoming values of a
85/// phi in the control flow graph to determine where an incoming value can
86/// simply be taken as a scalar lane mask as-is, and where it needs to be
87/// merged with another, previously defined lane mask.
88///
89/// The approach is as follows:
90/// - Determine all basic blocks which, starting from the incoming blocks,
91/// a wave may reach before entering the def block (the block containing the
92/// phi).
93/// - If an incoming block has no predecessors in this set, we can take the
94/// incoming value as a scalar lane mask as-is.
95/// -- A special case of this is when the def block has a self-loop.
96/// - Otherwise, the incoming value needs to be merged with a previously
97/// defined lane mask.
98/// - If there is a path into the set of reachable blocks that does _not_ go
99/// through an incoming block where we can take the scalar lane mask as-is,
100/// we need to invent an available value for the SSAUpdater. Choices are
101/// 0 and undef, with differing consequences for how to merge values etc.
102///
103/// TODO: We could use region analysis to quickly skip over SESE regions during
104/// the traversal.
105///
106class PhiIncomingAnalysis {
107 MachinePostDominatorTree &PDT;
108 const SIInstrInfo *TII;
109
110 // For each reachable basic block, whether it is a source in the induced
111 // subgraph of the CFG.
112 MapVector<MachineBasicBlock *, bool> ReachableMap;
113 SmallVector<MachineBasicBlock *, 4> Stack;
114 SmallVector<MachineBasicBlock *, 4> Predecessors;
115
116public:
117 PhiIncomingAnalysis(MachinePostDominatorTree &PDT, const SIInstrInfo *TII)
118 : PDT(PDT), TII(TII) {}
119
120 /// Returns whether \p MBB is a source in the induced subgraph of reachable
121 /// blocks.
122 bool isSource(MachineBasicBlock &MBB) const {
123 return ReachableMap.find(&MBB)->second;
124 }
125
126 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
127
128 void analyze(MachineBasicBlock &DefBlock,
129 ArrayRef<AMDGPU::Incoming> Incomings) {
130 assert(Stack.empty());
131 ReachableMap.clear();
132 Predecessors.clear();
133
134 // Insert the def block first, so that it acts as an end point for the
135 // traversal.
136 ReachableMap.try_emplace(&DefBlock, false);
137
138 for (auto Incoming : Incomings) {
139 MachineBasicBlock *MBB = Incoming.Block;
140 if (MBB == &DefBlock) {
141 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
142 continue;
143 }
144
145 ReachableMap.try_emplace(MBB, false);
146
147 // If this block has a divergent terminator and the def block is its
148 // post-dominator, the wave may first visit the other successors.
149 if (TII->hasDivergentBranch(MBB) && PDT.dominates(&DefBlock, MBB))
150 append_range(Stack, MBB->successors());
151 }
152
153 while (!Stack.empty()) {
154 MachineBasicBlock *MBB = Stack.pop_back_val();
155 if (ReachableMap.try_emplace(MBB, false).second)
156 append_range(Stack, MBB->successors());
157 }
158
159 for (auto &[MBB, Reachable] : ReachableMap) {
160 bool HaveReachablePred = false;
161 for (MachineBasicBlock *Pred : MBB->predecessors()) {
162 if (ReachableMap.count(Pred)) {
163 HaveReachablePred = true;
164 } else {
165 Stack.push_back(Pred);
166 }
167 }
168 if (!HaveReachablePred)
169 Reachable = true;
170 if (HaveReachablePred) {
171 for (MachineBasicBlock *UnreachablePred : Stack) {
172 if (!llvm::is_contained(Predecessors, UnreachablePred))
173 Predecessors.push_back(UnreachablePred);
174 }
175 }
176 Stack.clear();
177 }
178 }
179};
180
181/// Helper class that detects loops which require us to lower an i1 COPY into
182/// bitwise manipulation.
183///
184/// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
185/// between loops with the same header. Consider this example:
186///
187/// A-+-+
188/// | | |
189/// B-+ |
190/// | |
191/// C---+
192///
193/// A is the header of a loop containing A, B, and C as far as LoopInfo is
194/// concerned. However, an i1 COPY in B that is used in C must be lowered to
195/// bitwise operations to combine results from different loop iterations when
196/// B has a divergent branch (since by default we will compile this code such
197/// that threads in a wave are merged at the entry of C).
198///
199/// The following rule is implemented to determine whether bitwise operations
200/// are required: use the bitwise lowering for a def in block B if a backward
201/// edge to B is reachable without going through the nearest common
202/// post-dominator of B and all uses of the def.
203///
204/// TODO: This rule is conservative because it does not check whether the
205/// relevant branches are actually divergent.
206///
207/// The class is designed to cache the CFG traversal so that it can be re-used
208/// for multiple defs within the same basic block.
209///
210/// TODO: We could use region analysis to quickly skip over SESE regions during
211/// the traversal.
212///
213class LoopFinder {
214 MachineDominatorTree &DT;
215 MachinePostDominatorTree &PDT;
216
217 // All visited / reachable block, tagged by level (level 0 is the def block,
218 // level 1 are all blocks reachable including but not going through the def
219 // block's IPDOM, etc.).
220 DenseMap<MachineBasicBlock *, unsigned> Visited;
221
222 // Nearest common dominator of all visited blocks by level (level 0 is the
223 // def block). Used for seeding the SSAUpdater.
224 SmallVector<MachineBasicBlock *, 4> CommonDominators;
225
226 // Post-dominator of all visited blocks.
227 MachineBasicBlock *VisitedPostDom = nullptr;
228
229 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
230 // reachable without going through the IPDOM of the def block (if the IPDOM
231 // itself has an edge to the def block, the loop level is 2), etc.
232 unsigned FoundLoopLevel = ~0u;
233
234 MachineBasicBlock *DefBlock = nullptr;
235 SmallVector<MachineBasicBlock *, 4> Stack;
236 SmallVector<MachineBasicBlock *, 4> NextLevel;
237
238public:
239 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
240 : DT(DT), PDT(PDT) {}
241
242 void initialize(MachineBasicBlock &MBB) {
243 Visited.clear();
244 CommonDominators.clear();
245 Stack.clear();
246 NextLevel.clear();
247 VisitedPostDom = nullptr;
248 FoundLoopLevel = ~0u;
249
250 DefBlock = &MBB;
251 }
252
253 /// Check whether a backward edge can be reached without going through the
254 /// given \p PostDom of the def block.
255 ///
256 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
257 unsigned findLoop(MachineBasicBlock *PostDom) {
258 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
259
260 if (!VisitedPostDom)
261 advanceLevel();
262
263 unsigned Level = 0;
264 while (PDNode->getBlock() != PostDom) {
265 if (PDNode->getBlock() == VisitedPostDom)
266 advanceLevel();
267 PDNode = PDNode->getIDom();
268 Level++;
269 if (FoundLoopLevel == Level)
270 return Level;
271 }
272
273 return 0;
274 }
275
276 /// Add undef values dominating the loop and the optionally given additional
277 /// blocks, so that the SSA updater doesn't have to search all the way to the
278 /// function entry.
279 void addLoopEntries(unsigned LoopLevel, MachineIDFSSAUpdater &SSAUpdater,
280 MachineRegisterInfo &MRI,
281 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs,
282 ArrayRef<AMDGPU::Incoming> Incomings = {}) {
283 assert(LoopLevel < CommonDominators.size());
284
285 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
286 for (auto &Incoming : Incomings)
287 Dom = DT.findNearestCommonDominator(Dom, Incoming.Block);
288
289 if (!inLoopLevel(*Dom, LoopLevel, Incomings)) {
290 SSAUpdater.addAvailableValue(
291 Dom, insertUndefLaneMask(Dom, &MRI, LaneMaskRegAttrs));
292 } else {
293 // The dominator is part of the loop or the given blocks, so add the
294 // undef value to unreachable predecessors instead.
295 for (MachineBasicBlock *Pred : Dom->predecessors()) {
296 if (!inLoopLevel(*Pred, LoopLevel, Incomings))
297 SSAUpdater.addAvailableValue(
298 Pred, insertUndefLaneMask(Pred, &MRI, LaneMaskRegAttrs));
299 }
300 }
301 }
302
303private:
304 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
305 ArrayRef<AMDGPU::Incoming> Incomings) const {
306 auto DomIt = Visited.find(&MBB);
307 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
308 return true;
309
310 for (auto &Incoming : Incomings)
311 if (Incoming.Block == &MBB)
312 return true;
313
314 return false;
315 }
316
317 void advanceLevel() {
318 MachineBasicBlock *VisitedDom;
319
320 if (!VisitedPostDom) {
321 VisitedPostDom = DefBlock;
322 VisitedDom = DefBlock;
323 Stack.push_back(DefBlock);
324 } else {
325 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
326 VisitedDom = CommonDominators.back();
327
328 for (unsigned i = 0; i < NextLevel.size();) {
329 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
330 Stack.push_back(NextLevel[i]);
331
332 NextLevel[i] = NextLevel.back();
333 NextLevel.pop_back();
334 } else {
335 i++;
336 }
337 }
338 }
339
340 unsigned Level = CommonDominators.size();
341 while (!Stack.empty()) {
342 MachineBasicBlock *MBB = Stack.pop_back_val();
343 if (!PDT.dominates(VisitedPostDom, MBB))
344 NextLevel.push_back(MBB);
345
346 Visited[MBB] = Level;
347 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
348
349 for (MachineBasicBlock *Succ : MBB->successors()) {
350 if (Succ == DefBlock) {
351 if (MBB == VisitedPostDom)
352 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
353 else
354 FoundLoopLevel = std::min(FoundLoopLevel, Level);
355 continue;
356 }
357
358 if (Visited.try_emplace(Succ, ~0u).second) {
359 if (MBB == VisitedPostDom)
360 NextLevel.push_back(Succ);
361 else
362 Stack.push_back(Succ);
363 }
364 }
365 }
366
367 CommonDominators.push_back(VisitedDom);
368 }
369};
370
371} // End anonymous namespace.
372
375 return MRI->createVirtualRegister(LaneMaskRegAttrs);
376}
377
378static Register
380 MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs) {
381 MachineFunction &MF = *MBB->getParent();
382 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
383 const SIInstrInfo *TII = ST.getInstrInfo();
384 Register UndefReg = AMDGPU::createLaneMaskReg(MRI, LaneMaskRegAttrs);
385 BuildMI(*MBB, MBB->getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
386 UndefReg);
387 return UndefReg;
388}
389
390#ifndef NDEBUG
392 const MachineRegisterInfo &MRI,
393 Register Reg) {
394 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
395 return Size == 1 || Size == 32;
396}
397#endif
398
399bool Vreg1LoweringHelper::lowerCopiesFromI1() {
400 bool Changed = false;
401 SmallVector<MachineInstr *, 4> DeadCopies;
402
403 for (MachineBasicBlock &MBB : *MF) {
404 for (MachineInstr &MI : MBB) {
405 if (MI.getOpcode() != AMDGPU::COPY)
406 continue;
407
408 Register DstReg = MI.getOperand(0).getReg();
409 Register SrcReg = MI.getOperand(1).getReg();
410 if (!isVreg1(SrcReg))
411 continue;
412
413 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
414 continue;
415
416 Changed = true;
417
418 // Copy into a 32-bit vector register.
419 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
420 const DebugLoc &DL = MI.getDebugLoc();
421
423 assert(!MI.getOperand(0).getSubReg());
424
425 ConstrainRegs.insert(SrcReg);
426 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
427 .addImm(0)
428 .addImm(0)
429 .addImm(0)
430 .addImm(-1)
431 .addReg(SrcReg);
432 DeadCopies.push_back(&MI);
433 }
434
435 for (MachineInstr *MI : DeadCopies)
436 MI->eraseFromParent();
437 DeadCopies.clear();
438 }
439 return Changed;
440}
441
445 : MF(MF), DT(DT), PDT(PDT), ST(&MF->getSubtarget<GCNSubtarget>()),
447 MRI = &MF->getRegInfo();
448
449 TII = ST->getInstrInfo();
450}
451
453 LoopFinder LF(*DT, *PDT);
454 PhiIncomingAnalysis PIA(*PDT, TII);
456 SmallVector<Incoming, 4> Incomings;
457
458 getCandidatesForLowering(Vreg1Phis);
459 if (Vreg1Phis.empty())
460 return false;
461
462 DT->updateDFSNumbers();
463 MachineBasicBlock *PrevMBB = nullptr;
464 for (MachineInstr *MI : Vreg1Phis) {
465 MachineBasicBlock &MBB = *MI->getParent();
466 if (&MBB != PrevMBB) {
467 LF.initialize(MBB);
468 PrevMBB = &MBB;
469 }
470
471 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
472
473 Register DstReg = MI->getOperand(0).getReg();
474 markAsLaneMask(DstReg);
476
478
479 // Sort the incomings such that incoming values that dominate other incoming
480 // values are sorted earlier. This allows us to do some amount of on-the-fly
481 // constant folding.
482 // Incoming with smaller DFSNumIn goes first, DFSNumIn is 0 for entry block.
483 llvm::sort(Incomings, [this](Incoming LHS, Incoming RHS) {
484 return DT->getNode(LHS.Block)->getDFSNumIn() <
485 DT->getNode(RHS.Block)->getDFSNumIn();
486 });
487
488#ifndef NDEBUG
489 PhiRegisters.insert(DstReg);
490#endif
491
492 // Phis in a loop that are observed outside the loop receive a simple but
493 // conservatively correct treatment.
494 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
495 for (MachineInstr &Use : MRI->use_instructions(DstReg))
496 DomBlocks.push_back(Use.getParent());
497
498 MachineBasicBlock *PostDomBound =
499 PDT->findNearestCommonDominator(DomBlocks);
500
501 // FIXME: This fails to find irreducible cycles. If we have a def (other
502 // than a constant) in a pair of blocks that end up looping back to each
503 // other, it will be mishandle. Due to structurization this shouldn't occur
504 // in practice.
505 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
506
508 SSAUpdater.addUseBlock(&MBB);
509
510 if (FoundLoopLevel) {
511 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, *MRI, LaneMaskRegAttrs,
512 Incomings);
513
514 for (auto &Incoming : Incomings) {
515 SSAUpdater.addUseBlock(Incoming.Block);
517 SSAUpdater.addAvailableValue(Incoming.Block, Incoming.UpdatedReg);
518 }
519
520 SSAUpdater.calculate();
521
522 for (auto &Incoming : Incomings) {
526 SSAUpdater.getValueInMiddleOfBlock(&IMBB), Incoming.Reg);
527 }
528 } else {
529 // The phi is not observed from outside a loop. Use a more accurate
530 // lowering.
531 PIA.analyze(MBB, Incomings);
532
533 for (MachineBasicBlock *MBB : PIA.predecessors())
534 SSAUpdater.addAvailableValue(
536
537 for (auto &Incoming : Incomings) {
539 if (PIA.isSource(IMBB)) {
541 SSAUpdater.addAvailableValue(&IMBB, Incoming.Reg);
542 } else {
543 SSAUpdater.addUseBlock(&IMBB);
545 SSAUpdater.addAvailableValue(&IMBB, Incoming.UpdatedReg);
546 }
547 }
548
549 SSAUpdater.calculate();
550
551 for (auto &Incoming : Incomings) {
553 continue;
554
558 SSAUpdater.getValueInMiddleOfBlock(&IMBB), Incoming.Reg);
559 }
560 }
561
562 Register NewReg = SSAUpdater.getValueInMiddleOfBlock(&MBB);
563 if (NewReg != DstReg) {
564 replaceDstReg(NewReg, DstReg, &MBB);
565 MI->eraseFromParent();
566 }
567
568 Incomings.clear();
569 }
570 return true;
571}
572
573bool Vreg1LoweringHelper::lowerCopiesToI1() {
574 bool Changed = false;
575 LoopFinder LF(*DT, *PDT);
577
578 for (MachineBasicBlock &MBB : *MF) {
579 LF.initialize(MBB);
580
581 for (MachineInstr &MI : MBB) {
582 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
583 MI.getOpcode() != AMDGPU::COPY)
584 continue;
585
586 Register DstReg = MI.getOperand(0).getReg();
587 if (!isVreg1(DstReg))
588 continue;
589
590 Changed = true;
591
592 if (MRI->use_empty(DstReg)) {
593 DeadCopies.push_back(&MI);
594 continue;
595 }
596
597 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
598
599 markAsLaneMask(DstReg);
600 initializeLaneMaskRegisterAttributes(DstReg);
601
602 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
603 continue;
604
605 const DebugLoc &DL = MI.getDebugLoc();
606 Register SrcReg = MI.getOperand(1).getReg();
607 assert(!MI.getOperand(1).getSubReg());
608
609 if (!SrcReg.isVirtual() || (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
610 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
611 Register TmpReg = AMDGPU::createLaneMaskReg(MRI, LaneMaskRegAttrs);
612 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
613 .addReg(SrcReg)
614 .addImm(0);
615 MI.getOperand(1).setReg(TmpReg);
616 SrcReg = TmpReg;
617 } else {
618 // SrcReg needs to be live beyond copy.
619 MI.getOperand(1).setIsKill(false);
620 }
621
622 // Defs in a loop that are observed outside the loop must be transformed
623 // into appropriate bit manipulation.
624 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
625 for (MachineInstr &Use : MRI->use_instructions(DstReg))
626 DomBlocks.push_back(Use.getParent());
627
628 MachineBasicBlock *PostDomBound =
629 PDT->findNearestCommonDominator(DomBlocks);
630 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
631 if (FoundLoopLevel) {
632 MachineIDFSSAUpdater SSAUpdater(*DT, *MF, DstReg);
633 SSAUpdater.addUseBlock(&MBB);
634 SSAUpdater.addAvailableValue(&MBB, DstReg);
635 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, *MRI, LaneMaskRegAttrs);
636
637 SSAUpdater.calculate();
638 buildMergeLaneMasks(MBB, MI, DL, DstReg,
639 SSAUpdater.getValueInMiddleOfBlock(&MBB), SrcReg);
640 DeadCopies.push_back(&MI);
641 }
642 }
643
644 for (MachineInstr *MI : DeadCopies)
645 MI->eraseFromParent();
646 DeadCopies.clear();
647 }
648 return Changed;
649}
650
652 bool &Val) const {
653 const MachineInstr *MI;
654 for (;;) {
655 MI = MRI->getUniqueVRegDef(Reg);
656 if (MI->getOpcode() == AMDGPU::IMPLICIT_DEF)
657 return true;
658
659 if (MI->getOpcode() != AMDGPU::COPY)
660 break;
661
662 Reg = MI->getOperand(1).getReg();
663 if (!Reg.isVirtual())
664 return false;
665 if (!isLaneMaskReg(Reg))
666 return false;
667 }
668
669 if (MI->getOpcode() != LMC->MovOpc)
670 return false;
671
672 if (!MI->getOperand(1).isImm())
673 return false;
674
675 int64_t Imm = MI->getOperand(1).getImm();
676 if (Imm == 0) {
677 Val = false;
678 return true;
679 }
680 if (Imm == -1) {
681 Val = true;
682 return true;
683 }
684
685 return false;
686}
687
688static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
689 Def = false;
690 Use = false;
691
692 for (const MachineOperand &MO : MI.operands()) {
693 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
694 if (MO.isUse())
695 Use = true;
696 else
697 Def = true;
698 }
699 }
700}
701
702/// Return a point at the end of the given \p MBB to insert SALU instructions
703/// for lane mask calculation. Take terminators and SCC into account.
706 auto InsertionPt = MBB.getFirstTerminator();
707 bool TerminatorsUseSCC = false;
708 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
709 bool DefsSCC;
710 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
711 if (TerminatorsUseSCC || DefsSCC)
712 break;
713 }
714
715 if (!TerminatorsUseSCC)
716 return InsertionPt;
717
718 while (InsertionPt != MBB.begin()) {
719 InsertionPt--;
720
721 bool DefSCC, UseSCC;
722 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
723 if (DefSCC)
724 return InsertionPt;
725 }
726
727 // We should have at least seen an IMPLICIT_DEF or COPY
728 llvm_unreachable("SCC used by terminator but no def in block");
729}
730
731// VReg_1 -> SReg_32 or SReg_64
732void Vreg1LoweringHelper::markAsLaneMask(Register DstReg) const {
733 MRI->setRegClass(DstReg, ST->getBoolRC());
734}
735
736void Vreg1LoweringHelper::getCandidatesForLowering(
737 SmallVectorImpl<MachineInstr *> &Vreg1Phis) const {
738 for (MachineBasicBlock &MBB : *MF) {
739 for (MachineInstr &MI : MBB.phis()) {
740 if (isVreg1(MI.getOperand(0).getReg()))
741 Vreg1Phis.push_back(&MI);
742 }
743 }
744}
745
746void Vreg1LoweringHelper::collectIncomingValuesFromPhi(
747 const MachineInstr *MI,
748 SmallVectorImpl<AMDGPU::Incoming> &Incomings) const {
749 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
750 assert(i + 1 < MI->getNumOperands());
751 Register IncomingReg = MI->getOperand(i).getReg();
752 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
753 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
754
755 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
756 IncomingReg = IncomingDef->getOperand(1).getReg();
757 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
758 assert(!IncomingDef->getOperand(1).getSubReg());
759 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
760 continue;
761 } else {
762 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
763 }
764
765 Incomings.emplace_back(IncomingReg, IncomingMBB, Register());
766 }
767}
768
769void Vreg1LoweringHelper::replaceDstReg(Register NewReg, Register OldReg,
770 MachineBasicBlock *MBB) {
771 MRI->replaceRegWith(NewReg, OldReg);
772}
773
774void Vreg1LoweringHelper::buildMergeLaneMasks(MachineBasicBlock &MBB,
776 const DebugLoc &DL,
777 Register DstReg, Register PrevReg,
778 Register CurReg) {
779 bool PrevVal = false;
780 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
781 bool CurVal = false;
782 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
783
784 if (PrevConstant && CurConstant) {
785 if (PrevVal == CurVal) {
786 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
787 } else if (CurVal) {
788 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(LMC->ExecReg);
789 } else {
790 BuildMI(MBB, I, DL, TII->get(LMC->XorOpc), DstReg)
791 .addReg(LMC->ExecReg)
792 .addImm(-1);
793 }
794 return;
795 }
796
797 Register PrevMaskedReg;
798 Register CurMaskedReg;
799 if (!PrevConstant) {
800 if (CurConstant && CurVal) {
801 PrevMaskedReg = PrevReg;
802 } else {
803 PrevMaskedReg = AMDGPU::createLaneMaskReg(MRI, LaneMaskRegAttrs);
804 BuildMI(MBB, I, DL, TII->get(LMC->AndN2Opc), PrevMaskedReg)
805 .addReg(PrevReg)
806 .addReg(LMC->ExecReg);
807 }
808 }
809 if (!CurConstant) {
810 // TODO: check whether CurReg is already masked by EXEC
811 if (PrevConstant && PrevVal) {
812 CurMaskedReg = CurReg;
813 } else {
814 CurMaskedReg = AMDGPU::createLaneMaskReg(MRI, LaneMaskRegAttrs);
815 BuildMI(MBB, I, DL, TII->get(LMC->AndOpc), CurMaskedReg)
816 .addReg(CurReg)
817 .addReg(LMC->ExecReg);
818 }
819 }
820
821 if (PrevConstant && !PrevVal) {
822 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
823 .addReg(CurMaskedReg);
824 } else if (CurConstant && !CurVal) {
825 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
826 .addReg(PrevMaskedReg);
827 } else if (PrevConstant && PrevVal) {
828 BuildMI(MBB, I, DL, TII->get(LMC->OrN2Opc), DstReg)
829 .addReg(CurMaskedReg)
830 .addReg(LMC->ExecReg);
831 } else {
832 BuildMI(MBB, I, DL, TII->get(LMC->OrOpc), DstReg)
833 .addReg(PrevMaskedReg)
834 .addReg(CurMaskedReg ? CurMaskedReg : LMC->ExecReg);
835 }
836}
837
838void Vreg1LoweringHelper::constrainAsLaneMask(AMDGPU::Incoming &In) {}
839
840/// Lower all instructions that def or use vreg_1 registers.
841///
842/// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
843/// occur around inline assembly. We do this first, before vreg_1 registers
844/// are changed to scalar mask registers.
845///
846/// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
847/// all others, because phi lowering looks through copies and can therefore
848/// often make copy lowering unnecessary.
851 // Only need to run this in SelectionDAG path.
852 if (MF.getProperties().hasSelected())
853 return false;
854
855 Vreg1LoweringHelper Helper(&MF, &MDT, &MPDT);
856 bool Changed = false;
857 Changed |= Helper.lowerCopiesFromI1();
858 Changed |= Helper.lowerPhis();
859 Changed |= Helper.lowerCopiesToI1();
860 return Helper.cleanConstrainRegs(Changed);
861}
862
863PreservedAnalyses
876
878public:
879 static char ID;
880
882
883 bool runOnMachineFunction(MachineFunction &MF) override;
884
885 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
886
893};
894
902
904 false, false)
909
910char SILowerI1CopiesLegacy::ID = 0;
911
913
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define DEBUG_TYPE
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use)
static Register insertUndefLaneMask(MachineBasicBlock *MBB, MachineRegisterInfo *MRI, MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs)
static bool runFixI1Copies(MachineFunction &MF, MachineDominatorTree &MDT, MachinePostDominatorTree &MPDT)
Lower all instructions that def or use vreg_1 registers.
static bool isVRegCompatibleReg(const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI, Register Reg)
Interface definition of the PhiLoweringHelper class that implements lane mask merging algorithm for d...
#define LLVM_DEBUG(...)
Definition Debug.h:114
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, const llvm::StringTable &StandardNames, VectorLibrary VecLib)
Initialize the set of available library functions based on the specified target triple.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool isLaneMaskReg(Register Reg) const
MachinePostDominatorTree * PDT
virtual void replaceDstReg(Register NewReg, Register OldReg, MachineBasicBlock *MBB)=0
MachineBasicBlock::iterator getSaluInsertionAtEnd(MachineBasicBlock &MBB) const
Return a point at the end of the given MBB to insert SALU instructions for lane mask calculation.
bool isConstantLaneMask(Register Reg, bool &Val) const
MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs
void initializeLaneMaskRegisterAttributes(Register LaneMask)
virtual void buildMergeLaneMasks(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, Register PrevReg, Register CurReg)=0
virtual void getCandidatesForLowering(SmallVectorImpl< MachineInstr * > &Vreg1Phis) const =0
const AMDGPU::LaneMaskConstants * LMC
DenseSet< Register > PhiRegisters
virtual void markAsLaneMask(Register DstReg) const =0
virtual void constrainAsLaneMask(Incoming &In)=0
virtual void collectIncomingValuesFromPhi(const MachineInstr *MI, SmallVectorImpl< Incoming > &Incomings) const =0
PhiLoweringHelper(MachineFunction *MF, MachineDominatorTree *DT, MachinePostDominatorTree *PDT)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
A debug info location.
Definition DebugLoc.h:123
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
const HexagonRegisterInfo & getRegisterInfo() const
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register getValueInMiddleOfBlock(MachineBasicBlock *BB)
See SSAUpdater::GetValueInMiddleOfBlock description.
void addAvailableValue(MachineBasicBlock *BB, Register V)
Indicate that a rewritten value is available in the specified block with the specified value.
void calculate()
Calculate and insert necessary PHI nodes for SSA form.
void addUseBlock(MachineBasicBlock *BB)
Record a basic block that uses the value.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
Register getReg() const
getReg - Returns the register number.
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
LLVM_ABI MachineBasicBlock * findNearestCommonDominator(ArrayRef< MachineBasicBlock * > Blocks) const
Returns the nearest common dominator of the given blocks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
bool use_empty(Register RegNo) const
use_empty - Return true if there are no instructions using the specified register.
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
iterator find(const KeyT &Key)
Definition MapVector.h:154
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition MapVector.h:116
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition SSAUpdater.h:39
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Register createLaneMaskReg(MachineRegisterInfo *MRI, MachineRegisterInfo::VRegAttrs LaneMaskRegAttrs)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
ArrayRef(const T &OneElt) -> ArrayRef< T >
FunctionPass * createSILowerI1CopiesLegacyPass()
char & SILowerI1CopiesLegacyID
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
MachineBasicBlock * Block
All attributes(register class or bank and low-level type) a virtual register can have.