LLVM  9.0.0svn
CombinerHelper.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
16 
17 #define DEBUG_TYPE "gi-combiner"
18 
19 using namespace llvm;
20 
23  : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
24 
26  unsigned ToReg) const {
27  Observer.changingAllUsesOfReg(MRI, FromReg);
28 
29  if (MRI.constrainRegAttrs(ToReg, FromReg))
30  MRI.replaceRegWith(FromReg, ToReg);
31  else
32  Builder.buildCopy(ToReg, FromReg);
33 
35 }
36 
38  MachineOperand &FromRegOp,
39  unsigned ToReg) const {
40  assert(FromRegOp.getParent() && "Expected an operand in an MI");
41  Observer.changingInstr(*FromRegOp.getParent());
42 
43  FromRegOp.setReg(ToReg);
44 
45  Observer.changedInstr(*FromRegOp.getParent());
46 }
47 
49  if (matchCombineCopy(MI)) {
50  applyCombineCopy(MI);
51  return true;
52  }
53  return false;
54 }
56  if (MI.getOpcode() != TargetOpcode::COPY)
57  return false;
58  unsigned DstReg = MI.getOperand(0).getReg();
59  unsigned SrcReg = MI.getOperand(1).getReg();
60  LLT DstTy = MRI.getType(DstReg);
61  LLT SrcTy = MRI.getType(SrcReg);
62  // Simple Copy Propagation.
63  // a(sx) = COPY b(sx) -> Replace all uses of a with b.
64  if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy)
65  return true;
66  return false;
67 }
69  unsigned DstReg = MI.getOperand(0).getReg();
70  unsigned SrcReg = MI.getOperand(1).getReg();
71  MI.eraseFromParent();
72  replaceRegWith(MRI, DstReg, SrcReg);
73 }
74 
75 namespace {
76 
77 /// Select a preference between two uses. CurrentUse is the current preference
78 /// while *ForCandidate is attributes of the candidate under consideration.
79 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
80  const LLT &TyForCandidate,
81  unsigned OpcodeForCandidate,
82  MachineInstr *MIForCandidate) {
83  if (!CurrentUse.Ty.isValid()) {
84  if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
85  CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
86  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
87  return CurrentUse;
88  }
89 
90  // We permit the extend to hoist through basic blocks but this is only
91  // sensible if the target has extending loads. If you end up lowering back
92  // into a load and extend during the legalizer then the end result is
93  // hoisting the extend up to the load.
94 
95  // Prefer defined extensions to undefined extensions as these are more
96  // likely to reduce the number of instructions.
97  if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
98  CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
99  return CurrentUse;
100  else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
101  OpcodeForCandidate != TargetOpcode::G_ANYEXT)
102  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
103 
104  // Prefer sign extensions to zero extensions as sign-extensions tend to be
105  // more expensive.
106  if (CurrentUse.Ty == TyForCandidate) {
107  if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
108  OpcodeForCandidate == TargetOpcode::G_ZEXT)
109  return CurrentUse;
110  else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
111  OpcodeForCandidate == TargetOpcode::G_SEXT)
112  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
113  }
114 
115  // This is potentially target specific. We've chosen the largest type
116  // because G_TRUNC is usually free. One potential catch with this is that
117  // some targets have a reduced number of larger registers than smaller
118  // registers and this choice potentially increases the live-range for the
119  // larger value.
120  if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
121  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
122  }
123  return CurrentUse;
124 }
125 
126 /// Find a suitable place to insert some instructions and insert them. This
127 /// function accounts for special cases like inserting before a PHI node.
128 /// The current strategy for inserting before PHI's is to duplicate the
129 /// instructions for each predecessor. However, while that's ok for G_TRUNC
130 /// on most targets since it generally requires no code, other targets/cases may
131 /// want to try harder to find a dominating block.
132 static void InsertInsnsWithoutSideEffectsBeforeUse(
135  Inserter) {
136  MachineInstr &UseMI = *UseMO.getParent();
137 
138  MachineBasicBlock *InsertBB = UseMI.getParent();
139 
140  // If the use is a PHI then we want the predecessor block instead.
141  if (UseMI.isPHI()) {
142  MachineOperand *PredBB = std::next(&UseMO);
143  InsertBB = PredBB->getMBB();
144  }
145 
146  // If the block is the same block as the def then we want to insert just after
147  // the def instead of at the start of the block.
148  if (InsertBB == DefMI.getParent()) {
150  Inserter(InsertBB, std::next(InsertPt));
151  return;
152  }
153 
154  // Otherwise we want the start of the BB
155  Inserter(InsertBB, InsertBB->getFirstNonPHI());
156 }
157 } // end anonymous namespace
158 
160  PreferredTuple Preferred;
161  if (matchCombineExtendingLoads(MI, Preferred)) {
162  applyCombineExtendingLoads(MI, Preferred);
163  return true;
164  }
165  return false;
166 }
167 
169  PreferredTuple &Preferred) {
170  // We match the loads and follow the uses to the extend instead of matching
171  // the extends and following the def to the load. This is because the load
172  // must remain in the same position for correctness (unless we also add code
173  // to find a safe place to sink it) whereas the extend is freely movable.
174  // It also prevents us from duplicating the load for the volatile case or just
175  // for performance.
176 
177  if (MI.getOpcode() != TargetOpcode::G_LOAD &&
178  MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
179  MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
180  return false;
181 
182  auto &LoadValue = MI.getOperand(0);
183  assert(LoadValue.isReg() && "Result wasn't a register?");
184 
185  LLT LoadValueTy = MRI.getType(LoadValue.getReg());
186  if (!LoadValueTy.isScalar())
187  return false;
188 
189  // Most architectures are going to legalize <s8 loads into at least a 1 byte
190  // load, and the MMOs can only describe memory accesses in multiples of bytes.
191  // If we try to perform extload combining on those, we can end up with
192  // %a(s8) = extload %ptr (load 1 byte from %ptr)
193  // ... which is an illegal extload instruction.
194  if (LoadValueTy.getSizeInBits() < 8)
195  return false;
196 
197  // For non power-of-2 types, they will very likely be legalized into multiple
198  // loads. Don't bother trying to match them into extending loads.
199  if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
200  return false;
201 
202  // Find the preferred type aside from the any-extends (unless it's the only
203  // one) and non-extending ops. We'll emit an extending load to that type and
204  // and emit a variant of (extend (trunc X)) for the others according to the
205  // relative type sizes. At the same time, pick an extend to use based on the
206  // extend involved in the chosen type.
207  unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
208  ? TargetOpcode::G_ANYEXT
209  : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
210  ? TargetOpcode::G_SEXT
211  : TargetOpcode::G_ZEXT;
212  Preferred = {LLT(), PreferredOpcode, nullptr};
213  for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
214  if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
215  UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
216  UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
217  Preferred = ChoosePreferredUse(Preferred,
218  MRI.getType(UseMI.getOperand(0).getReg()),
219  UseMI.getOpcode(), &UseMI);
220  }
221  }
222 
223  // There were no extends
224  if (!Preferred.MI)
225  return false;
226  // It should be impossible to chose an extend without selecting a different
227  // type since by definition the result of an extend is larger.
228  assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
229 
230  LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
231  return true;
232 }
233 
235  PreferredTuple &Preferred) {
236  struct InsertionPoint {
237  MachineOperand *UseMO;
238  MachineBasicBlock *InsertIntoBB;
239  MachineBasicBlock::iterator InsertBefore;
240  InsertionPoint(MachineOperand *UseMO, MachineBasicBlock *InsertIntoBB,
241  MachineBasicBlock::iterator InsertBefore)
242  : UseMO(UseMO), InsertIntoBB(InsertIntoBB), InsertBefore(InsertBefore) {
243  }
244  };
245 
246  // Rewrite the load to the chosen extending load.
247  unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
248  Observer.changingInstr(MI);
249  MI.setDesc(
250  Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
251  ? TargetOpcode::G_SEXTLOAD
252  : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
253  ? TargetOpcode::G_ZEXTLOAD
254  : TargetOpcode::G_LOAD));
255 
256  // Rewrite all the uses to fix up the types.
257  SmallVector<MachineInstr *, 1> ScheduleForErase;
258  SmallVector<InsertionPoint, 4> ScheduleForInsert;
259  auto &LoadValue = MI.getOperand(0);
260  for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) {
261  MachineInstr *UseMI = UseMO.getParent();
262 
263  // If the extend is compatible with the preferred extend then we should fix
264  // up the type and extend so that it uses the preferred use.
265  if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
266  UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
267  unsigned UseDstReg = UseMI->getOperand(0).getReg();
268  MachineOperand &UseSrcMO = UseMI->getOperand(1);
269  const LLT &UseDstTy = MRI.getType(UseDstReg);
270  if (UseDstReg != ChosenDstReg) {
271  if (Preferred.Ty == UseDstTy) {
272  // If the use has the same type as the preferred use, then merge
273  // the vregs and erase the extend. For example:
274  // %1:_(s8) = G_LOAD ...
275  // %2:_(s32) = G_SEXT %1(s8)
276  // %3:_(s32) = G_ANYEXT %1(s8)
277  // ... = ... %3(s32)
278  // rewrites to:
279  // %2:_(s32) = G_SEXTLOAD ...
280  // ... = ... %2(s32)
281  replaceRegWith(MRI, UseDstReg, ChosenDstReg);
282  ScheduleForErase.push_back(UseMO.getParent());
283  } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
284  // If the preferred size is smaller, then keep the extend but extend
285  // from the result of the extending load. For example:
286  // %1:_(s8) = G_LOAD ...
287  // %2:_(s32) = G_SEXT %1(s8)
288  // %3:_(s64) = G_ANYEXT %1(s8)
289  // ... = ... %3(s64)
290  /// rewrites to:
291  // %2:_(s32) = G_SEXTLOAD ...
292  // %3:_(s64) = G_ANYEXT %2:_(s32)
293  // ... = ... %3(s64)
294  replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
295  } else {
296  // If the preferred size is large, then insert a truncate. For
297  // example:
298  // %1:_(s8) = G_LOAD ...
299  // %2:_(s64) = G_SEXT %1(s8)
300  // %3:_(s32) = G_ZEXT %1(s8)
301  // ... = ... %3(s32)
302  /// rewrites to:
303  // %2:_(s64) = G_SEXTLOAD ...
304  // %4:_(s8) = G_TRUNC %2:_(s32)
305  // %3:_(s64) = G_ZEXT %2:_(s8)
306  // ... = ... %3(s64)
307  InsertInsnsWithoutSideEffectsBeforeUse(
308  Builder, MI, UseMO,
309  [&](MachineBasicBlock *InsertIntoBB,
310  MachineBasicBlock::iterator InsertBefore) {
311  ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
312  });
313  }
314  continue;
315  }
316  // The use is (one of) the uses of the preferred use we chose earlier.
317  // We're going to update the load to def this value later so just erase
318  // the old extend.
319  ScheduleForErase.push_back(UseMO.getParent());
320  continue;
321  }
322 
323  // The use isn't an extend. Truncate back to the type we originally loaded.
324  // This is free on many targets.
325  InsertInsnsWithoutSideEffectsBeforeUse(
326  Builder, MI, UseMO,
327  [&](MachineBasicBlock *InsertIntoBB,
328  MachineBasicBlock::iterator InsertBefore) {
329  ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
330  });
331  }
332 
334  for (auto &InsertionInfo : ScheduleForInsert) {
335  MachineOperand *UseMO = InsertionInfo.UseMO;
336  MachineBasicBlock *InsertIntoBB = InsertionInfo.InsertIntoBB;
337  MachineBasicBlock::iterator InsertBefore = InsertionInfo.InsertBefore;
338 
339  MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
340  if (PreviouslyEmitted) {
341  Observer.changingInstr(*UseMO->getParent());
342  UseMO->setReg(PreviouslyEmitted->getOperand(0).getReg());
343  Observer.changedInstr(*UseMO->getParent());
344  continue;
345  }
346 
347  Builder.setInsertPt(*InsertIntoBB, InsertBefore);
348  unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
349  MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
350  EmittedInsns[InsertIntoBB] = NewMI;
351  replaceRegOpWith(MRI, *UseMO, NewDstReg);
352  }
353  for (auto &EraseMI : ScheduleForErase) {
354  Observer.erasingInstr(*EraseMI);
355  EraseMI->eraseFromParent();
356  }
357  MI.getOperand(0).setReg(ChosenDstReg);
358  Observer.changedInstr(MI);
359 }
360 
362  if (tryCombineCopy(MI))
363  return true;
364  return tryCombineExtendingLoads(MI);
365 }
unsigned getReg(unsigned Idx) const
Get the register for the operand index.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:645
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
MachineBasicBlock * getMBB() const
This class represents lattice values for constants.
Definition: AllocatorList.h:23
unsigned getReg() const
getReg - Returns the register number.
bool tryCombine(MachineInstr &MI)
Try to transform MI by using all of the above combine functions.
bool constrainRegAttrs(unsigned Reg, unsigned ConstrainingReg, unsigned MinNumRegs=0)
Constrain the register class or the register bank of the virtual register Reg (and low-level type) to...
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
bool isPHI() const
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B)
void applyCombineCopy(MachineInstr &MI)
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:408
void finishedChangingAllUsesOfReg()
All instructions reported as changing by changingAllUsesOfReg() have finished being changed...
Abstract class that contains various methods for clients to notify about changes. ...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:428
void replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, unsigned ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
MachineInstrBuilder & UseMI
Helper class to build MachineInstr.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, unsigned ToReg) const
Replace a single register operand with a new register and inform the observer of the changes...
bool isValid() const
bool tryCombineExtendingLoads(MachineInstr &MI)
If MI is extend that consumes the result of a load, try to combine it.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
bool tryCombineCopy(MachineInstr &MI)
If MI is COPY, try to combine it.
void changingAllUsesOfReg(const MachineRegisterInfo &MRI, unsigned Reg)
All the instructions using the given register are being changed.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:841
MachineInstrBuilder MachineInstrBuilder & DefMI
bool matchCombineCopy(MachineInstr &MI)
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
MachineInstr * MI
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
void replaceRegWith(unsigned FromReg, unsigned ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:253
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:63
void setReg(unsigned Reg)
Change the register this operand corresponds to.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
iterator_range< use_instr_iterator > use_instructions(unsigned Reg) const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
print Print MemDeps of function
IRTranslator LLVM IR MI
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:413
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.