LLVM  14.0.0git
CombinerHelper.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/ADT/SetVector.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/Support/Casting.h"
34 #include <tuple>
35 
36 #define DEBUG_TYPE "gi-combiner"
37 
38 using namespace llvm;
39 using namespace MIPatternMatch;
40 
41 // Option to allow testing of the combiner while no targets know about indexed
42 // addressing.
43 static cl::opt<bool>
44  ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
45  cl::desc("Force all indexed operations to be "
46  "legal for the GlobalISel combiner"));
47 
51  const LegalizerInfo *LI)
52  : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
53  MDT(MDT), LI(LI), RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
54  TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
55  (void)this->KB;
56 }
57 
60 }
61 
62 /// \returns The little endian in-memory byte position of byte \p I in a
63 /// \p ByteWidth bytes wide type.
64 ///
65 /// E.g. Given a 4-byte type x, x[0] -> byte 0
66 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
67  assert(I < ByteWidth && "I must be in [0, ByteWidth)");
68  return I;
69 }
70 
71 /// \returns The big endian in-memory byte position of byte \p I in a
72 /// \p ByteWidth bytes wide type.
73 ///
74 /// E.g. Given a 4-byte type x, x[0] -> byte 3
75 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
76  assert(I < ByteWidth && "I must be in [0, ByteWidth)");
77  return ByteWidth - I - 1;
78 }
79 
80 /// Given a map from byte offsets in memory to indices in a load/store,
81 /// determine if that map corresponds to a little or big endian byte pattern.
82 ///
83 /// \param MemOffset2Idx maps memory offsets to address offsets.
84 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
85 ///
86 /// \returns true if the map corresponds to a big endian byte pattern, false
87 /// if it corresponds to a little endian byte pattern, and None otherwise.
88 ///
89 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
90 /// are as follows:
91 ///
92 /// AddrOffset Little endian Big endian
93 /// 0 0 3
94 /// 1 1 2
95 /// 2 2 1
96 /// 3 3 0
97 static Optional<bool>
99  int64_t LowestIdx) {
100  // Need at least two byte positions to decide on endianness.
101  unsigned Width = MemOffset2Idx.size();
102  if (Width < 2)
103  return None;
104  bool BigEndian = true, LittleEndian = true;
105  for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
106  auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
107  if (MemOffsetAndIdx == MemOffset2Idx.end())
108  return None;
109  const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
110  assert(Idx >= 0 && "Expected non-negative byte offset?");
111  LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
112  BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
113  if (!BigEndian && !LittleEndian)
114  return None;
115  }
116 
117  assert((BigEndian != LittleEndian) &&
118  "Pattern cannot be both big and little endian!");
119  return BigEndian;
120 }
121 
123  const LegalityQuery &Query) const {
124  return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
125 }
126 
128  Register ToReg) const {
130 
131  if (MRI.constrainRegAttrs(ToReg, FromReg))
132  MRI.replaceRegWith(FromReg, ToReg);
133  else
134  Builder.buildCopy(ToReg, FromReg);
135 
137 }
138 
140  MachineOperand &FromRegOp,
141  Register ToReg) const {
142  assert(FromRegOp.getParent() && "Expected an operand in an MI");
143  Observer.changingInstr(*FromRegOp.getParent());
144 
145  FromRegOp.setReg(ToReg);
146 
147  Observer.changedInstr(*FromRegOp.getParent());
148 }
149 
151  return RBI->getRegBank(Reg, MRI, *TRI);
152 }
153 
155  if (RegBank)
156  MRI.setRegBank(Reg, *RegBank);
157 }
158 
160  if (matchCombineCopy(MI)) {
162  return true;
163  }
164  return false;
165 }
167  if (MI.getOpcode() != TargetOpcode::COPY)
168  return false;
169  Register DstReg = MI.getOperand(0).getReg();
170  Register SrcReg = MI.getOperand(1).getReg();
171  return canReplaceReg(DstReg, SrcReg, MRI);
172 }
174  Register DstReg = MI.getOperand(0).getReg();
175  Register SrcReg = MI.getOperand(1).getReg();
176  MI.eraseFromParent();
177  replaceRegWith(MRI, DstReg, SrcReg);
178 }
179 
181  bool IsUndef = false;
183  if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
184  applyCombineConcatVectors(MI, IsUndef, Ops);
185  return true;
186  }
187  return false;
188 }
189 
192  assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
193  "Invalid instruction");
194  IsUndef = true;
195  MachineInstr *Undef = nullptr;
196 
197  // Walk over all the operands of concat vectors and check if they are
198  // build_vector themselves or undef.
199  // Then collect their operands in Ops.
200  for (const MachineOperand &MO : MI.uses()) {
201  Register Reg = MO.getReg();
203  assert(Def && "Operand not defined");
204  switch (Def->getOpcode()) {
205  case TargetOpcode::G_BUILD_VECTOR:
206  IsUndef = false;
207  // Remember the operands of the build_vector to fold
208  // them into the yet-to-build flattened concat vectors.
209  for (const MachineOperand &BuildVecMO : Def->uses())
210  Ops.push_back(BuildVecMO.getReg());
211  break;
212  case TargetOpcode::G_IMPLICIT_DEF: {
213  LLT OpType = MRI.getType(Reg);
214  // Keep one undef value for all the undef operands.
215  if (!Undef) {
216  Builder.setInsertPt(*MI.getParent(), MI);
218  }
219  assert(MRI.getType(Undef->getOperand(0).getReg()) ==
220  OpType.getScalarType() &&
221  "All undefs should have the same type");
222  // Break the undef vector in as many scalar elements as needed
223  // for the flattening.
224  for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
225  EltIdx != EltEnd; ++EltIdx)
226  Ops.push_back(Undef->getOperand(0).getReg());
227  break;
228  }
229  default:
230  return false;
231  }
232  }
233  return true;
234 }
236  MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
237  // We determined that the concat_vectors can be flatten.
238  // Generate the flattened build_vector.
239  Register DstReg = MI.getOperand(0).getReg();
240  Builder.setInsertPt(*MI.getParent(), MI);
241  Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
242 
243  // Note: IsUndef is sort of redundant. We could have determine it by
244  // checking that at all Ops are undef. Alternatively, we could have
245  // generate a build_vector of undefs and rely on another combine to
246  // clean that up. For now, given we already gather this information
247  // in tryCombineConcatVectors, just save compile time and issue the
248  // right thing.
249  if (IsUndef)
250  Builder.buildUndef(NewDstReg);
251  else
252  Builder.buildBuildVector(NewDstReg, Ops);
253  MI.eraseFromParent();
254  replaceRegWith(MRI, DstReg, NewDstReg);
255 }
256 
259  if (matchCombineShuffleVector(MI, Ops)) {
261  return true;
262  }
263  return false;
264 }
265 
268  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
269  "Invalid instruction kind");
270  LLT DstType = MRI.getType(MI.getOperand(0).getReg());
271  Register Src1 = MI.getOperand(1).getReg();
272  LLT SrcType = MRI.getType(Src1);
273  // As bizarre as it may look, shuffle vector can actually produce
274  // scalar! This is because at the IR level a <1 x ty> shuffle
275  // vector is perfectly valid.
276  unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
277  unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
278 
279  // If the resulting vector is smaller than the size of the source
280  // vectors being concatenated, we won't be able to replace the
281  // shuffle vector into a concat_vectors.
282  //
283  // Note: We may still be able to produce a concat_vectors fed by
284  // extract_vector_elt and so on. It is less clear that would
285  // be better though, so don't bother for now.
286  //
287  // If the destination is a scalar, the size of the sources doesn't
288  // matter. we will lower the shuffle to a plain copy. This will
289  // work only if the source and destination have the same size. But
290  // that's covered by the next condition.
291  //
292  // TODO: If the size between the source and destination don't match
293  // we could still emit an extract vector element in that case.
294  if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
295  return false;
296 
297  // Check that the shuffle mask can be broken evenly between the
298  // different sources.
299  if (DstNumElts % SrcNumElts != 0)
300  return false;
301 
302  // Mask length is a multiple of the source vector length.
303  // Check if the shuffle is some kind of concatenation of the input
304  // vectors.
305  unsigned NumConcat = DstNumElts / SrcNumElts;
306  SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
307  ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
308  for (unsigned i = 0; i != DstNumElts; ++i) {
309  int Idx = Mask[i];
310  // Undef value.
311  if (Idx < 0)
312  continue;
313  // Ensure the indices in each SrcType sized piece are sequential and that
314  // the same source is used for the whole piece.
315  if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
316  (ConcatSrcs[i / SrcNumElts] >= 0 &&
317  ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
318  return false;
319  // Remember which source this index came from.
320  ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
321  }
322 
323  // The shuffle is concatenating multiple vectors together.
324  // Collect the different operands for that.
325  Register UndefReg;
326  Register Src2 = MI.getOperand(2).getReg();
327  for (auto Src : ConcatSrcs) {
328  if (Src < 0) {
329  if (!UndefReg) {
330  Builder.setInsertPt(*MI.getParent(), MI);
331  UndefReg = Builder.buildUndef(SrcType).getReg(0);
332  }
333  Ops.push_back(UndefReg);
334  } else if (Src == 0)
335  Ops.push_back(Src1);
336  else
337  Ops.push_back(Src2);
338  }
339  return true;
340 }
341 
343  const ArrayRef<Register> Ops) {
344  Register DstReg = MI.getOperand(0).getReg();
345  Builder.setInsertPt(*MI.getParent(), MI);
346  Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
347 
348  if (Ops.size() == 1)
349  Builder.buildCopy(NewDstReg, Ops[0]);
350  else
351  Builder.buildMerge(NewDstReg, Ops);
352 
353  MI.eraseFromParent();
354  replaceRegWith(MRI, DstReg, NewDstReg);
355 }
356 
357 namespace {
358 
359 /// Select a preference between two uses. CurrentUse is the current preference
360 /// while *ForCandidate is attributes of the candidate under consideration.
361 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
362  const LLT TyForCandidate,
363  unsigned OpcodeForCandidate,
364  MachineInstr *MIForCandidate) {
365  if (!CurrentUse.Ty.isValid()) {
366  if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
367  CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
368  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
369  return CurrentUse;
370  }
371 
372  // We permit the extend to hoist through basic blocks but this is only
373  // sensible if the target has extending loads. If you end up lowering back
374  // into a load and extend during the legalizer then the end result is
375  // hoisting the extend up to the load.
376 
377  // Prefer defined extensions to undefined extensions as these are more
378  // likely to reduce the number of instructions.
379  if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
380  CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
381  return CurrentUse;
382  else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
383  OpcodeForCandidate != TargetOpcode::G_ANYEXT)
384  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
385 
386  // Prefer sign extensions to zero extensions as sign-extensions tend to be
387  // more expensive.
388  if (CurrentUse.Ty == TyForCandidate) {
389  if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
390  OpcodeForCandidate == TargetOpcode::G_ZEXT)
391  return CurrentUse;
392  else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
393  OpcodeForCandidate == TargetOpcode::G_SEXT)
394  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
395  }
396 
397  // This is potentially target specific. We've chosen the largest type
398  // because G_TRUNC is usually free. One potential catch with this is that
399  // some targets have a reduced number of larger registers than smaller
400  // registers and this choice potentially increases the live-range for the
401  // larger value.
402  if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
403  return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
404  }
405  return CurrentUse;
406 }
407 
408 /// Find a suitable place to insert some instructions and insert them. This
409 /// function accounts for special cases like inserting before a PHI node.
410 /// The current strategy for inserting before PHI's is to duplicate the
411 /// instructions for each predecessor. However, while that's ok for G_TRUNC
412 /// on most targets since it generally requires no code, other targets/cases may
413 /// want to try harder to find a dominating block.
414 static void InsertInsnsWithoutSideEffectsBeforeUse(
417  MachineOperand &UseMO)>
418  Inserter) {
419  MachineInstr &UseMI = *UseMO.getParent();
420 
421  MachineBasicBlock *InsertBB = UseMI.getParent();
422 
423  // If the use is a PHI then we want the predecessor block instead.
424  if (UseMI.isPHI()) {
425  MachineOperand *PredBB = std::next(&UseMO);
426  InsertBB = PredBB->getMBB();
427  }
428 
429  // If the block is the same block as the def then we want to insert just after
430  // the def instead of at the start of the block.
431  if (InsertBB == DefMI.getParent()) {
433  Inserter(InsertBB, std::next(InsertPt), UseMO);
434  return;
435  }
436 
437  // Otherwise we want the start of the BB
438  Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
439 }
440 } // end anonymous namespace
441 
443  PreferredTuple Preferred;
444  if (matchCombineExtendingLoads(MI, Preferred)) {
445  applyCombineExtendingLoads(MI, Preferred);
446  return true;
447  }
448  return false;
449 }
450 
452  PreferredTuple &Preferred) {
453  // We match the loads and follow the uses to the extend instead of matching
454  // the extends and following the def to the load. This is because the load
455  // must remain in the same position for correctness (unless we also add code
456  // to find a safe place to sink it) whereas the extend is freely movable.
457  // It also prevents us from duplicating the load for the volatile case or just
458  // for performance.
459  GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
460  if (!LoadMI)
461  return false;
462 
463  Register LoadReg = LoadMI->getDstReg();
464 
465  LLT LoadValueTy = MRI.getType(LoadReg);
466  if (!LoadValueTy.isScalar())
467  return false;
468 
469  // Most architectures are going to legalize <s8 loads into at least a 1 byte
470  // load, and the MMOs can only describe memory accesses in multiples of bytes.
471  // If we try to perform extload combining on those, we can end up with
472  // %a(s8) = extload %ptr (load 1 byte from %ptr)
473  // ... which is an illegal extload instruction.
474  if (LoadValueTy.getSizeInBits() < 8)
475  return false;
476 
477  // For non power-of-2 types, they will very likely be legalized into multiple
478  // loads. Don't bother trying to match them into extending loads.
479  if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
480  return false;
481 
482  // Find the preferred type aside from the any-extends (unless it's the only
483  // one) and non-extending ops. We'll emit an extending load to that type and
484  // and emit a variant of (extend (trunc X)) for the others according to the
485  // relative type sizes. At the same time, pick an extend to use based on the
486  // extend involved in the chosen type.
487  unsigned PreferredOpcode =
488  isa<GLoad>(&MI)
489  ? TargetOpcode::G_ANYEXT
490  : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
491  Preferred = {LLT(), PreferredOpcode, nullptr};
492  for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
493  if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
494  UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
495  (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
496  const auto &MMO = LoadMI->getMMO();
497  // For atomics, only form anyextending loads.
498  if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
499  continue;
500  // Check for legality.
501  if (LI) {
502  LegalityQuery::MemDesc MMDesc(MMO);
503  LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
504  LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
505  if (LI->getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}})
506  .Action != LegalizeActions::Legal)
507  continue;
508  }
509  Preferred = ChoosePreferredUse(Preferred,
510  MRI.getType(UseMI.getOperand(0).getReg()),
511  UseMI.getOpcode(), &UseMI);
512  }
513  }
514 
515  // There were no extends
516  if (!Preferred.MI)
517  return false;
518  // It should be impossible to chose an extend without selecting a different
519  // type since by definition the result of an extend is larger.
520  assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
521 
522  LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
523  return true;
524 }
525 
526 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
527  PreferredTuple &Preferred) {
528  // Rewrite the load to the chosen extending load.
529  Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
530 
531  // Inserter to insert a truncate back to the original type at a given point
532  // with some basic CSE to limit truncate duplication to one per BB.
534  auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
535  MachineBasicBlock::iterator InsertBefore,
536  MachineOperand &UseMO) {
537  MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
538  if (PreviouslyEmitted) {
539  Observer.changingInstr(*UseMO.getParent());
540  UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
541  Observer.changedInstr(*UseMO.getParent());
542  return;
543  }
544 
545  Builder.setInsertPt(*InsertIntoBB, InsertBefore);
546  Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
547  MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
548  EmittedInsns[InsertIntoBB] = NewMI;
549  replaceRegOpWith(MRI, UseMO, NewDstReg);
550  };
551 
553  MI.setDesc(
554  Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
555  ? TargetOpcode::G_SEXTLOAD
556  : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
557  ? TargetOpcode::G_ZEXTLOAD
558  : TargetOpcode::G_LOAD));
559 
560  // Rewrite all the uses to fix up the types.
561  auto &LoadValue = MI.getOperand(0);
563  for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
564  Uses.push_back(&UseMO);
565 
566  for (auto *UseMO : Uses) {
567  MachineInstr *UseMI = UseMO->getParent();
568 
569  // If the extend is compatible with the preferred extend then we should fix
570  // up the type and extend so that it uses the preferred use.
571  if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
572  UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
573  Register UseDstReg = UseMI->getOperand(0).getReg();
574  MachineOperand &UseSrcMO = UseMI->getOperand(1);
575  const LLT UseDstTy = MRI.getType(UseDstReg);
576  if (UseDstReg != ChosenDstReg) {
577  if (Preferred.Ty == UseDstTy) {
578  // If the use has the same type as the preferred use, then merge
579  // the vregs and erase the extend. For example:
580  // %1:_(s8) = G_LOAD ...
581  // %2:_(s32) = G_SEXT %1(s8)
582  // %3:_(s32) = G_ANYEXT %1(s8)
583  // ... = ... %3(s32)
584  // rewrites to:
585  // %2:_(s32) = G_SEXTLOAD ...
586  // ... = ... %2(s32)
587  replaceRegWith(MRI, UseDstReg, ChosenDstReg);
588  Observer.erasingInstr(*UseMO->getParent());
589  UseMO->getParent()->eraseFromParent();
590  } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
591  // If the preferred size is smaller, then keep the extend but extend
592  // from the result of the extending load. For example:
593  // %1:_(s8) = G_LOAD ...
594  // %2:_(s32) = G_SEXT %1(s8)
595  // %3:_(s64) = G_ANYEXT %1(s8)
596  // ... = ... %3(s64)
597  /// rewrites to:
598  // %2:_(s32) = G_SEXTLOAD ...
599  // %3:_(s64) = G_ANYEXT %2:_(s32)
600  // ... = ... %3(s64)
601  replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
602  } else {
603  // If the preferred size is large, then insert a truncate. For
604  // example:
605  // %1:_(s8) = G_LOAD ...
606  // %2:_(s64) = G_SEXT %1(s8)
607  // %3:_(s32) = G_ZEXT %1(s8)
608  // ... = ... %3(s32)
609  /// rewrites to:
610  // %2:_(s64) = G_SEXTLOAD ...
611  // %4:_(s8) = G_TRUNC %2:_(s32)
612  // %3:_(s64) = G_ZEXT %2:_(s8)
613  // ... = ... %3(s64)
614  InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
615  InsertTruncAt);
616  }
617  continue;
618  }
619  // The use is (one of) the uses of the preferred use we chose earlier.
620  // We're going to update the load to def this value later so just erase
621  // the old extend.
622  Observer.erasingInstr(*UseMO->getParent());
623  UseMO->getParent()->eraseFromParent();
624  continue;
625  }
626 
627  // The use isn't an extend. Truncate back to the type we originally loaded.
628  // This is free on many targets.
629  InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
630  }
631 
632  MI.getOperand(0).setReg(ChosenDstReg);
634 }
635 
637  BuildFnTy &MatchInfo) {
638  assert(MI.getOpcode() == TargetOpcode::G_AND);
639 
640  // If we have the following code:
641  // %mask = G_CONSTANT 255
642  // %ld = G_LOAD %ptr, (load s16)
643  // %and = G_AND %ld, %mask
644  //
645  // Try to fold it into
646  // %ld = G_ZEXTLOAD %ptr, (load s8)
647 
648  Register Dst = MI.getOperand(0).getReg();
649  if (MRI.getType(Dst).isVector())
650  return false;
651 
652  auto MaybeMask =
653  getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
654  if (!MaybeMask)
655  return false;
656 
657  APInt MaskVal = MaybeMask->Value;
658 
659  if (!MaskVal.isMask())
660  return false;
661 
662  Register SrcReg = MI.getOperand(1).getReg();
663  GAnyLoad *LoadMI = getOpcodeDef<GAnyLoad>(SrcReg, MRI);
664  if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()) ||
665  !LoadMI->isSimple())
666  return false;
667 
668  Register LoadReg = LoadMI->getDstReg();
669  LLT LoadTy = MRI.getType(LoadReg);
670  Register PtrReg = LoadMI->getPointerReg();
671  uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
672  unsigned MaskSizeBits = MaskVal.countTrailingOnes();
673 
674  // The mask may not be larger than the in-memory type, as it might cover sign
675  // extended bits
676  if (MaskSizeBits > LoadSizeBits)
677  return false;
678 
679  // If the mask covers the whole destination register, there's nothing to
680  // extend
681  if (MaskSizeBits >= LoadTy.getSizeInBits())
682  return false;
683 
684  // Most targets cannot deal with loads of size < 8 and need to re-legalize to
685  // at least byte loads. Avoid creating such loads here
686  if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
687  return false;
688 
689  const MachineMemOperand &MMO = LoadMI->getMMO();
690  LegalityQuery::MemDesc MemDesc(MMO);
691  MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
693  {TargetOpcode::G_ZEXTLOAD, {LoadTy, MRI.getType(PtrReg)}, {MemDesc}}))
694  return false;
695 
696  MatchInfo = [=](MachineIRBuilder &B) {
697  B.setInstrAndDebugLoc(*LoadMI);
698  auto &MF = B.getMF();
699  auto PtrInfo = MMO.getPointerInfo();
700  auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MaskSizeBits / 8);
701  B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
702  };
703  return true;
704 }
705 
707  const MachineInstr &UseMI) {
708  assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
709  "shouldn't consider debug uses");
710  assert(DefMI.getParent() == UseMI.getParent());
711  if (&DefMI == &UseMI)
712  return true;
713  const MachineBasicBlock &MBB = *DefMI.getParent();
714  auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
715  return &MI == &DefMI || &MI == &UseMI;
716  });
717  if (DefOrUse == MBB.end())
718  llvm_unreachable("Block must contain both DefMI and UseMI!");
719  return &*DefOrUse == &DefMI;
720 }
721 
723  const MachineInstr &UseMI) {
724  assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
725  "shouldn't consider debug uses");
726  if (MDT)
727  return MDT->dominates(&DefMI, &UseMI);
728  else if (DefMI.getParent() != UseMI.getParent())
729  return false;
730 
731  return isPredecessor(DefMI, UseMI);
732 }
733 
735  assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
736  Register SrcReg = MI.getOperand(1).getReg();
737  Register LoadUser = SrcReg;
738 
739  if (MRI.getType(SrcReg).isVector())
740  return false;
741 
742  Register TruncSrc;
743  if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
744  LoadUser = TruncSrc;
745 
746  uint64_t SizeInBits = MI.getOperand(2).getImm();
747  // If the source is a G_SEXTLOAD from the same bit width, then we don't
748  // need any extend at all, just a truncate.
749  if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
750  // If truncating more than the original extended value, abort.
751  auto LoadSizeBits = LoadMI->getMemSizeInBits();
752  if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
753  return false;
754  if (LoadSizeBits == SizeInBits)
755  return true;
756  }
757  return false;
758 }
759 
761  assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
763  Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
764  MI.eraseFromParent();
765 }
766 
768  MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
769  assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
770 
771  // Only supports scalars for now.
772  if (MRI.getType(MI.getOperand(0).getReg()).isVector())
773  return false;
774 
775  Register SrcReg = MI.getOperand(1).getReg();
776  auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
777  if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()) ||
778  !LoadDef->isSimple())
779  return false;
780 
781  // If the sign extend extends from a narrower width than the load's width,
782  // then we can narrow the load width when we combine to a G_SEXTLOAD.
783  // Avoid widening the load at all.
784  unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(),
785  LoadDef->getMemSizeInBits());
786 
787  // Don't generate G_SEXTLOADs with a < 1 byte width.
788  if (NewSizeBits < 8)
789  return false;
790  // Don't bother creating a non-power-2 sextload, it will likely be broken up
791  // anyway for most targets.
792  if (!isPowerOf2_32(NewSizeBits))
793  return false;
794 
795  const MachineMemOperand &MMO = LoadDef->getMMO();
796  LegalityQuery::MemDesc MMDesc(MMO);
797  MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
798  if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
799  {MRI.getType(LoadDef->getDstReg()),
800  MRI.getType(LoadDef->getPointerReg())},
801  {MMDesc}}))
802  return false;
803 
804  MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
805  return true;
806 }
807 
809  MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
810  assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
811  Register LoadReg;
812  unsigned ScalarSizeBits;
813  std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
814  GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
815 
816  // If we have the following:
817  // %ld = G_LOAD %ptr, (load 2)
818  // %ext = G_SEXT_INREG %ld, 8
819  // ==>
820  // %ld = G_SEXTLOAD %ptr (load 1)
821 
822  auto &MMO = LoadDef->getMMO();
823  Builder.setInstrAndDebugLoc(*LoadDef);
824  auto &MF = Builder.getMF();
825  auto PtrInfo = MMO.getPointerInfo();
826  auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
827  Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
828  LoadDef->getPointerReg(), *NewMMO);
829  MI.eraseFromParent();
830 }
831 
832 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
834  auto &MF = *MI.getParent()->getParent();
835  const auto &TLI = *MF.getSubtarget().getTargetLowering();
836 
837 #ifndef NDEBUG
838  unsigned Opcode = MI.getOpcode();
839  assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
840  Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
841 #endif
842 
843  Base = MI.getOperand(1).getReg();
845  if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
846  return false;
847 
848  LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
849  // FIXME: The following use traversal needs a bail out for patholigical cases.
850  for (auto &Use : MRI.use_nodbg_instructions(Base)) {
851  if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
852  continue;
853 
854  Offset = Use.getOperand(2).getReg();
855  if (!ForceLegalIndexing &&
856  !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
857  LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "
858  << Use);
859  continue;
860  }
861 
862  // Make sure the offset calculation is before the potentially indexed op.
863  // FIXME: we really care about dependency here. The offset calculation might
864  // be movable.
865  MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
866  if (!OffsetDef || !dominates(*OffsetDef, MI)) {
867  LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "
868  << Use);
869  continue;
870  }
871 
872  // FIXME: check whether all uses of Base are load/store with foldable
873  // addressing modes. If so, using the normal addr-modes is better than
874  // forming an indexed one.
875 
876  bool MemOpDominatesAddrUses = true;
877  for (auto &PtrAddUse :
878  MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
879  if (!dominates(MI, PtrAddUse)) {
880  MemOpDominatesAddrUses = false;
881  break;
882  }
883  }
884 
885  if (!MemOpDominatesAddrUses) {
886  LLVM_DEBUG(
887  dbgs() << " Ignoring candidate as memop does not dominate uses: "
888  << Use);
889  continue;
890  }
891 
892  LLVM_DEBUG(dbgs() << " Found match: " << Use);
893  Addr = Use.getOperand(0).getReg();
894  return true;
895  }
896 
897  return false;
898 }
899 
900 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
902  auto &MF = *MI.getParent()->getParent();
903  const auto &TLI = *MF.getSubtarget().getTargetLowering();
904 
905 #ifndef NDEBUG
906  unsigned Opcode = MI.getOpcode();
907  assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
908  Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
909 #endif
910 
911  Addr = MI.getOperand(1).getReg();
912  MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
913  if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
914  return false;
915 
916  Base = AddrDef->getOperand(1).getReg();
917  Offset = AddrDef->getOperand(2).getReg();
918 
919  LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
920 
921  if (!ForceLegalIndexing &&
922  !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
923  LLVM_DEBUG(dbgs() << " Skipping, not legal for target");
924  return false;
925  }
926 
928  if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
929  LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.");
930  return false;
931  }
932 
933  if (MI.getOpcode() == TargetOpcode::G_STORE) {
934  // Would require a copy.
935  if (Base == MI.getOperand(0).getReg()) {
936  LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.");
937  return false;
938  }
939 
940  // We're expecting one use of Addr in MI, but it could also be the
941  // value stored, which isn't actually dominated by the instruction.
942  if (MI.getOperand(0).getReg() == Addr) {
943  LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses");
944  return false;
945  }
946  }
947 
948  // FIXME: check whether all uses of the base pointer are constant PtrAdds.
949  // That might allow us to end base's liveness here by adjusting the constant.
950 
951  for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
952  if (!dominates(MI, UseMI)) {
953  LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.");
954  return false;
955  }
956  }
957 
958  return true;
959 }
960 
962  IndexedLoadStoreMatchInfo MatchInfo;
963  if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
964  applyCombineIndexedLoadStore(MI, MatchInfo);
965  return true;
966  }
967  return false;
968 }
969 
971  unsigned Opcode = MI.getOpcode();
972  if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
973  Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
974  return false;
975 
976  // For now, no targets actually support these opcodes so don't waste time
977  // running these unless we're forced to for testing.
978  if (!ForceLegalIndexing)
979  return false;
980 
981  MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
982  MatchInfo.Offset);
983  if (!MatchInfo.IsPre &&
984  !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
985  MatchInfo.Offset))
986  return false;
987 
988  return true;
989 }
990 
993  MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
994  MachineIRBuilder MIRBuilder(MI);
995  unsigned Opcode = MI.getOpcode();
996  bool IsStore = Opcode == TargetOpcode::G_STORE;
997  unsigned NewOpcode;
998  switch (Opcode) {
999  case TargetOpcode::G_LOAD:
1000  NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1001  break;
1002  case TargetOpcode::G_SEXTLOAD:
1003  NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1004  break;
1005  case TargetOpcode::G_ZEXTLOAD:
1006  NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1007  break;
1008  case TargetOpcode::G_STORE:
1009  NewOpcode = TargetOpcode::G_INDEXED_STORE;
1010  break;
1011  default:
1012  llvm_unreachable("Unknown load/store opcode");
1013  }
1014 
1015  auto MIB = MIRBuilder.buildInstr(NewOpcode);
1016  if (IsStore) {
1017  MIB.addDef(MatchInfo.Addr);
1018  MIB.addUse(MI.getOperand(0).getReg());
1019  } else {
1020  MIB.addDef(MI.getOperand(0).getReg());
1021  MIB.addDef(MatchInfo.Addr);
1022  }
1023 
1024  MIB.addUse(MatchInfo.Base);
1025  MIB.addUse(MatchInfo.Offset);
1026  MIB.addImm(MatchInfo.IsPre);
1027  MI.eraseFromParent();
1028  AddrDef.eraseFromParent();
1029 
1030  LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
1031 }
1032 
1034  MachineInstr *&OtherMI) {
1035  unsigned Opcode = MI.getOpcode();
1036  bool IsDiv, IsSigned;
1037 
1038  switch (Opcode) {
1039  default:
1040  llvm_unreachable("Unexpected opcode!");
1041  case TargetOpcode::G_SDIV:
1042  case TargetOpcode::G_UDIV: {
1043  IsDiv = true;
1044  IsSigned = Opcode == TargetOpcode::G_SDIV;
1045  break;
1046  }
1047  case TargetOpcode::G_SREM:
1048  case TargetOpcode::G_UREM: {
1049  IsDiv = false;
1050  IsSigned = Opcode == TargetOpcode::G_SREM;
1051  break;
1052  }
1053  }
1054 
1055  Register Src1 = MI.getOperand(1).getReg();
1056  unsigned DivOpcode, RemOpcode, DivremOpcode;
1057  if (IsSigned) {
1058  DivOpcode = TargetOpcode::G_SDIV;
1059  RemOpcode = TargetOpcode::G_SREM;
1060  DivremOpcode = TargetOpcode::G_SDIVREM;
1061  } else {
1062  DivOpcode = TargetOpcode::G_UDIV;
1063  RemOpcode = TargetOpcode::G_UREM;
1064  DivremOpcode = TargetOpcode::G_UDIVREM;
1065  }
1066 
1067  if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1068  return false;
1069 
1070  // Combine:
1071  // %div:_ = G_[SU]DIV %src1:_, %src2:_
1072  // %rem:_ = G_[SU]REM %src1:_, %src2:_
1073  // into:
1074  // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1075 
1076  // Combine:
1077  // %rem:_ = G_[SU]REM %src1:_, %src2:_
1078  // %div:_ = G_[SU]DIV %src1:_, %src2:_
1079  // into:
1080  // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1081 
1082  for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1083  if (MI.getParent() == UseMI.getParent() &&
1084  ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1085  (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1086  matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2))) {
1087  OtherMI = &UseMI;
1088  return true;
1089  }
1090  }
1091 
1092  return false;
1093 }
1094 
1096  MachineInstr *&OtherMI) {
1097  unsigned Opcode = MI.getOpcode();
1098  assert(OtherMI && "OtherMI shouldn't be empty.");
1099 
1100  Register DestDivReg, DestRemReg;
1101  if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1102  DestDivReg = MI.getOperand(0).getReg();
1103  DestRemReg = OtherMI->getOperand(0).getReg();
1104  } else {
1105  DestDivReg = OtherMI->getOperand(0).getReg();
1106  DestRemReg = MI.getOperand(0).getReg();
1107  }
1108 
1109  bool IsSigned =
1110  Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1111 
1112  // Check which instruction is first in the block so we don't break def-use
1113  // deps by "moving" the instruction incorrectly.
1114  if (dominates(MI, *OtherMI))
1116  else
1117  Builder.setInstrAndDebugLoc(*OtherMI);
1118 
1119  Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1120  : TargetOpcode::G_UDIVREM,
1121  {DestDivReg, DestRemReg},
1122  {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
1123  MI.eraseFromParent();
1124  OtherMI->eraseFromParent();
1125 }
1126 
1128  MachineInstr *&BrCond) {
1129  assert(MI.getOpcode() == TargetOpcode::G_BR);
1130 
1131  // Try to match the following:
1132  // bb1:
1133  // G_BRCOND %c1, %bb2
1134  // G_BR %bb3
1135  // bb2:
1136  // ...
1137  // bb3:
1138 
1139  // The above pattern does not have a fall through to the successor bb2, always
1140  // resulting in a branch no matter which path is taken. Here we try to find
1141  // and replace that pattern with conditional branch to bb3 and otherwise
1142  // fallthrough to bb2. This is generally better for branch predictors.
1143 
1144  MachineBasicBlock *MBB = MI.getParent();
1146  if (BrIt == MBB->begin())
1147  return false;
1148  assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1149 
1150  BrCond = &*std::prev(BrIt);
1151  if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1152  return false;
1153 
1154  // Check that the next block is the conditional branch target. Also make sure
1155  // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1156  MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1157  return BrCondTarget != MI.getOperand(0).getMBB() &&
1158  MBB->isLayoutSuccessor(BrCondTarget);
1159 }
1160 
1162  MachineInstr *&BrCond) {
1163  MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1164  Builder.setInstrAndDebugLoc(*BrCond);
1165  LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1166  // FIXME: Does int/fp matter for this? If so, we might need to restrict
1167  // this to i1 only since we might not know for sure what kind of
1168  // compare generated the condition value.
1169  auto True = Builder.buildConstant(
1170  Ty, getICmpTrueVal(getTargetLowering(), false, false));
1171  auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1172 
1173  auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1175  MI.getOperand(0).setMBB(FallthroughBB);
1177 
1178  // Change the conditional branch to use the inverted condition and
1179  // new target block.
1180  Observer.changingInstr(*BrCond);
1181  BrCond->getOperand(0).setReg(Xor.getReg(0));
1182  BrCond->getOperand(1).setMBB(BrTarget);
1183  Observer.changedInstr(*BrCond);
1184 }
1185 
1187  if (Ty.isVector())
1189  Ty.getNumElements());
1190  return IntegerType::get(C, Ty.getSizeInBits());
1191 }
1192 
1194  MachineIRBuilder HelperBuilder(MI);
1195  GISelObserverWrapper DummyObserver;
1196  LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1197  return Helper.lowerMemcpyInline(MI) ==
1198  LegalizerHelper::LegalizeResult::Legalized;
1199 }
1200 
1202  MachineIRBuilder HelperBuilder(MI);
1203  GISelObserverWrapper DummyObserver;
1204  LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1205  return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1206  LegalizerHelper::LegalizeResult::Legalized;
1207 }
1208 
1209 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1210  const Register Op,
1211  const MachineRegisterInfo &MRI) {
1212  const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1213  if (!MaybeCst)
1214  return None;
1215 
1216  APFloat V = MaybeCst->getValueAPF();
1217  switch (Opcode) {
1218  default:
1219  llvm_unreachable("Unexpected opcode!");
1220  case TargetOpcode::G_FNEG: {
1221  V.changeSign();
1222  return V;
1223  }
1224  case TargetOpcode::G_FABS: {
1225  V.clearSign();
1226  return V;
1227  }
1228  case TargetOpcode::G_FPTRUNC:
1229  break;
1230  case TargetOpcode::G_FSQRT: {
1231  bool Unused;
1233  V = APFloat(sqrt(V.convertToDouble()));
1234  break;
1235  }
1236  case TargetOpcode::G_FLOG2: {
1237  bool Unused;
1239  V = APFloat(log2(V.convertToDouble()));
1240  break;
1241  }
1242  }
1243  // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1244  // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1245  // and `G_FLOG2` reach here.
1246  bool Unused;
1248  return V;
1249 }
1250 
1252  Optional<APFloat> &Cst) {
1253  Register DstReg = MI.getOperand(0).getReg();
1254  Register SrcReg = MI.getOperand(1).getReg();
1255  LLT DstTy = MRI.getType(DstReg);
1256  Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1257  return Cst.hasValue();
1258 }
1259 
1261  Optional<APFloat> &Cst) {
1262  assert(Cst.hasValue() && "Optional is unexpectedly empty!");
1264  MachineFunction &MF = Builder.getMF();
1265  auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1266  Register DstReg = MI.getOperand(0).getReg();
1267  Builder.buildFConstant(DstReg, *FPVal);
1268  MI.eraseFromParent();
1269 }
1270 
1272  PtrAddChain &MatchInfo) {
1273  // We're trying to match the following pattern:
1274  // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1275  // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1276  // -->
1277  // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1278 
1279  if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1280  return false;
1281 
1282  Register Add2 = MI.getOperand(1).getReg();
1283  Register Imm1 = MI.getOperand(2).getReg();
1284  auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1285  if (!MaybeImmVal)
1286  return false;
1287 
1288  MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1289  if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1290  return false;
1291 
1292  Register Base = Add2Def->getOperand(1).getReg();
1293  Register Imm2 = Add2Def->getOperand(2).getReg();
1294  auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1295  if (!MaybeImm2Val)
1296  return false;
1297 
1298  // Check if the new combined immediate forms an illegal addressing mode.
1299  // Do not combine if it was legal before but would get illegal.
1300  // To do so, we need to find a load/store user of the pointer to get
1301  // the access type.
1302  Type *AccessTy = nullptr;
1303  auto &MF = *MI.getMF();
1304  for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1305  if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1306  AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1307  MF.getFunction().getContext());
1308  break;
1309  }
1310  }
1312  APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1313  AMNew.BaseOffs = CombinedImm.getSExtValue();
1314  if (AccessTy) {
1315  AMNew.HasBaseReg = true;
1317  AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue();
1318  AMOld.HasBaseReg = true;
1319  unsigned AS = MRI.getType(Add2).getAddressSpace();
1320  const auto &TLI = *MF.getSubtarget().getTargetLowering();
1321  if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1322  !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1323  return false;
1324  }
1325 
1326  // Pass the combined immediate to the apply function.
1327  MatchInfo.Imm = AMNew.BaseOffs;
1328  MatchInfo.Base = Base;
1329  MatchInfo.Bank = getRegBank(Imm2);
1330  return true;
1331 }
1332 
1334  PtrAddChain &MatchInfo) {
1335  assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1336  MachineIRBuilder MIB(MI);
1337  LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1338  auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1339  setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1341  MI.getOperand(1).setReg(MatchInfo.Base);
1342  MI.getOperand(2).setReg(NewOffset.getReg(0));
1344 }
1345 
1347  RegisterImmPair &MatchInfo) {
1348  // We're trying to match the following pattern with any of
1349  // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1350  // %t1 = SHIFT %base, G_CONSTANT imm1
1351  // %root = SHIFT %t1, G_CONSTANT imm2
1352  // -->
1353  // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1354 
1355  unsigned Opcode = MI.getOpcode();
1356  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1357  Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1358  Opcode == TargetOpcode::G_USHLSAT) &&
1359  "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1360 
1361  Register Shl2 = MI.getOperand(1).getReg();
1362  Register Imm1 = MI.getOperand(2).getReg();
1363  auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1364  if (!MaybeImmVal)
1365  return false;
1366 
1367  MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1368  if (Shl2Def->getOpcode() != Opcode)
1369  return false;
1370 
1371  Register Base = Shl2Def->getOperand(1).getReg();
1372  Register Imm2 = Shl2Def->getOperand(2).getReg();
1373  auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1374  if (!MaybeImm2Val)
1375  return false;
1376 
1377  // Pass the combined immediate to the apply function.
1378  MatchInfo.Imm =
1379  (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1380  MatchInfo.Reg = Base;
1381 
1382  // There is no simple replacement for a saturating unsigned left shift that
1383  // exceeds the scalar size.
1384  if (Opcode == TargetOpcode::G_USHLSAT &&
1385  MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1386  return false;
1387 
1388  return true;
1389 }
1390 
1392  RegisterImmPair &MatchInfo) {
1393  unsigned Opcode = MI.getOpcode();
1394  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1395  Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1396  Opcode == TargetOpcode::G_USHLSAT) &&
1397  "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1398 
1400  LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1401  unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1402  auto Imm = MatchInfo.Imm;
1403 
1404  if (Imm >= ScalarSizeInBits) {
1405  // Any logical shift that exceeds scalar size will produce zero.
1406  if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1407  Builder.buildConstant(MI.getOperand(0), 0);
1408  MI.eraseFromParent();
1409  return;
1410  }
1411  // Arithmetic shift and saturating signed left shift have no effect beyond
1412  // scalar size.
1413  Imm = ScalarSizeInBits - 1;
1414  }
1415 
1416  LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1417  Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1419  MI.getOperand(1).setReg(MatchInfo.Reg);
1420  MI.getOperand(2).setReg(NewImm);
1422 }
1423 
1425  ShiftOfShiftedLogic &MatchInfo) {
1426  // We're trying to match the following pattern with any of
1427  // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1428  // with any of G_AND/G_OR/G_XOR logic instructions.
1429  // %t1 = SHIFT %X, G_CONSTANT C0
1430  // %t2 = LOGIC %t1, %Y
1431  // %root = SHIFT %t2, G_CONSTANT C1
1432  // -->
1433  // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1434  // %t4 = SHIFT %Y, G_CONSTANT C1
1435  // %root = LOGIC %t3, %t4
1436  unsigned ShiftOpcode = MI.getOpcode();
1437  assert((ShiftOpcode == TargetOpcode::G_SHL ||
1438  ShiftOpcode == TargetOpcode::G_ASHR ||
1439  ShiftOpcode == TargetOpcode::G_LSHR ||
1440  ShiftOpcode == TargetOpcode::G_USHLSAT ||
1441  ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1442  "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1443 
1444  // Match a one-use bitwise logic op.
1445  Register LogicDest = MI.getOperand(1).getReg();
1446  if (!MRI.hasOneNonDBGUse(LogicDest))
1447  return false;
1448 
1449  MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1450  unsigned LogicOpcode = LogicMI->getOpcode();
1451  if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1452  LogicOpcode != TargetOpcode::G_XOR)
1453  return false;
1454 
1455  // Find a matching one-use shift by constant.
1456  const Register C1 = MI.getOperand(2).getReg();
1457  auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1458  if (!MaybeImmVal)
1459  return false;
1460 
1461  const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1462 
1463  auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1464  // Shift should match previous one and should be a one-use.
1465  if (MI->getOpcode() != ShiftOpcode ||
1466  !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1467  return false;
1468 
1469  // Must be a constant.
1470  auto MaybeImmVal =
1471  getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1472  if (!MaybeImmVal)
1473  return false;
1474 
1475  ShiftVal = MaybeImmVal->Value.getSExtValue();
1476  return true;
1477  };
1478 
1479  // Logic ops are commutative, so check each operand for a match.
1480  Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1481  MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1482  Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1483  MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1484  uint64_t C0Val;
1485 
1486  if (matchFirstShift(LogicMIOp1, C0Val)) {
1487  MatchInfo.LogicNonShiftReg = LogicMIReg2;
1488  MatchInfo.Shift2 = LogicMIOp1;
1489  } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1490  MatchInfo.LogicNonShiftReg = LogicMIReg1;
1491  MatchInfo.Shift2 = LogicMIOp2;
1492  } else
1493  return false;
1494 
1495  MatchInfo.ValSum = C0Val + C1Val;
1496 
1497  // The fold is not valid if the sum of the shift values exceeds bitwidth.
1498  if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1499  return false;
1500 
1501  MatchInfo.Logic = LogicMI;
1502  return true;
1503 }
1504 
1506  ShiftOfShiftedLogic &MatchInfo) {
1507  unsigned Opcode = MI.getOpcode();
1508  assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1509  Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1510  Opcode == TargetOpcode::G_SSHLSAT) &&
1511  "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1512 
1513  LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1514  LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1516 
1517  Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1518 
1519  Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1520  Register Shift1 =
1521  Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1522 
1523  Register Shift2Const = MI.getOperand(2).getReg();
1524  Register Shift2 = Builder
1525  .buildInstr(Opcode, {DestType},
1526  {MatchInfo.LogicNonShiftReg, Shift2Const})
1527  .getReg(0);
1528 
1529  Register Dest = MI.getOperand(0).getReg();
1530  Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1531 
1532  // These were one use so it's safe to remove them.
1535 
1536  MI.eraseFromParent();
1537 }
1538 
1540  unsigned &ShiftVal) {
1541  assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1542  auto MaybeImmVal =
1543  getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1544  if (!MaybeImmVal)
1545  return false;
1546 
1547  ShiftVal = MaybeImmVal->Value.exactLogBase2();
1548  return (static_cast<int32_t>(ShiftVal) != -1);
1549 }
1550 
1552  unsigned &ShiftVal) {
1553  assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1554  MachineIRBuilder MIB(MI);
1555  LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1556  auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1558  MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1559  MI.getOperand(2).setReg(ShiftCst.getReg(0));
1561 }
1562 
1563 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1565  RegisterImmPair &MatchData) {
1566  assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1567 
1568  Register LHS = MI.getOperand(1).getReg();
1569 
1570  Register ExtSrc;
1571  if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1572  !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1573  !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1574  return false;
1575 
1576  // TODO: Should handle vector splat.
1577  Register RHS = MI.getOperand(2).getReg();
1578  auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI);
1579  if (!MaybeShiftAmtVal)
1580  return false;
1581 
1582  if (LI) {
1583  LLT SrcTy = MRI.getType(ExtSrc);
1584 
1585  // We only really care about the legality with the shifted value. We can
1586  // pick any type the constant shift amount, so ask the target what to
1587  // use. Otherwise we would have to guess and hope it is reported as legal.
1588  LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1589  if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1590  return false;
1591  }
1592 
1593  int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1594  MatchData.Reg = ExtSrc;
1595  MatchData.Imm = ShiftAmt;
1596 
1597  unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1598  return MinLeadingZeros >= ShiftAmt;
1599 }
1600 
1602  const RegisterImmPair &MatchData) {
1603  Register ExtSrcReg = MatchData.Reg;
1604  int64_t ShiftAmtVal = MatchData.Imm;
1605 
1606  LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1608  auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1609  auto NarrowShift =
1610  Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1611  Builder.buildZExt(MI.getOperand(0), NarrowShift);
1612  MI.eraseFromParent();
1613 }
1614 
1616  Register &MatchInfo) {
1617  GMerge &Merge = cast<GMerge>(MI);
1618  SmallVector<Register, 16> MergedValues;
1619  for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1620  MergedValues.emplace_back(Merge.getSourceReg(I));
1621 
1622  auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1623  if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1624  return false;
1625 
1626  for (unsigned I = 0; I < MergedValues.size(); ++I)
1627  if (MergedValues[I] != Unmerge->getReg(I))
1628  return false;
1629 
1630  MatchInfo = Unmerge->getSourceReg();
1631  return true;
1632 }
1633 
1635  const MachineRegisterInfo &MRI) {
1636  while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1637  ;
1638 
1639  return Reg;
1640 }
1641 
1644  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1645  "Expected an unmerge");
1646  auto &Unmerge = cast<GUnmerge>(MI);
1647  Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1648 
1649  auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg, MRI);
1650  if (!SrcInstr)
1651  return false;
1652 
1653  // Check the source type of the merge.
1654  LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1655  LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1656  bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1657  if (SrcMergeTy != Dst0Ty && !SameSize)
1658  return false;
1659  // They are the same now (modulo a bitcast).
1660  // We can collect all the src registers.
1661  for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1662  Operands.push_back(SrcInstr->getSourceReg(Idx));
1663  return true;
1664 }
1665 
1668  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1669  "Expected an unmerge");
1670  assert((MI.getNumOperands() - 1 == Operands.size()) &&
1671  "Not enough operands to replace all defs");
1672  unsigned NumElems = MI.getNumOperands() - 1;
1673 
1674  LLT SrcTy = MRI.getType(Operands[0]);
1675  LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1676  bool CanReuseInputDirectly = DstTy == SrcTy;
1678  for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1679  Register DstReg = MI.getOperand(Idx).getReg();
1680  Register SrcReg = Operands[Idx];
1681  if (CanReuseInputDirectly)
1682  replaceRegWith(MRI, DstReg, SrcReg);
1683  else
1684  Builder.buildCast(DstReg, SrcReg);
1685  }
1686  MI.eraseFromParent();
1687 }
1688 
1690  SmallVectorImpl<APInt> &Csts) {
1691  unsigned SrcIdx = MI.getNumOperands() - 1;
1692  Register SrcReg = MI.getOperand(SrcIdx).getReg();
1693  MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1694  if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1695  SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1696  return false;
1697  // Break down the big constant in smaller ones.
1698  const MachineOperand &CstVal = SrcInstr->getOperand(1);
1699  APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1700  ? CstVal.getCImm()->getValue()
1701  : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1702 
1703  LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1704  unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1705  // Unmerge a constant.
1706  for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1707  Csts.emplace_back(Val.trunc(ShiftAmt));
1708  Val = Val.lshr(ShiftAmt);
1709  }
1710 
1711  return true;
1712 }
1713 
1715  SmallVectorImpl<APInt> &Csts) {
1716  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1717  "Expected an unmerge");
1718  assert((MI.getNumOperands() - 1 == Csts.size()) &&
1719  "Not enough operands to replace all defs");
1720  unsigned NumElems = MI.getNumOperands() - 1;
1722  for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1723  Register DstReg = MI.getOperand(Idx).getReg();
1724  Builder.buildConstant(DstReg, Csts[Idx]);
1725  }
1726 
1727  MI.eraseFromParent();
1728 }
1729 
1731  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1732  "Expected an unmerge");
1733  // Check that all the lanes are dead except the first one.
1734  for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1735  if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1736  return false;
1737  }
1738  return true;
1739 }
1740 
1743  Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1744  // Truncating a vector is going to truncate every single lane,
1745  // whereas we want the full lowbits.
1746  // Do the operation on a scalar instead.
1747  LLT SrcTy = MRI.getType(SrcReg);
1748  if (SrcTy.isVector())
1749  SrcReg =
1750  Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1751 
1752  Register Dst0Reg = MI.getOperand(0).getReg();
1753  LLT Dst0Ty = MRI.getType(Dst0Reg);
1754  if (Dst0Ty.isVector()) {
1755  auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
1756  Builder.buildCast(Dst0Reg, MIB);
1757  } else
1758  Builder.buildTrunc(Dst0Reg, SrcReg);
1759  MI.eraseFromParent();
1760 }
1761 
1763  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1764  "Expected an unmerge");
1765  Register Dst0Reg = MI.getOperand(0).getReg();
1766  LLT Dst0Ty = MRI.getType(Dst0Reg);
1767  // G_ZEXT on vector applies to each lane, so it will
1768  // affect all destinations. Therefore we won't be able
1769  // to simplify the unmerge to just the first definition.
1770  if (Dst0Ty.isVector())
1771  return false;
1772  Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1773  LLT SrcTy = MRI.getType(SrcReg);
1774  if (SrcTy.isVector())
1775  return false;
1776 
1777  Register ZExtSrcReg;
1778  if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
1779  return false;
1780 
1781  // Finally we can replace the first definition with
1782  // a zext of the source if the definition is big enough to hold
1783  // all of ZExtSrc bits.
1784  LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1785  return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
1786 }
1787 
1789  assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1790  "Expected an unmerge");
1791 
1792  Register Dst0Reg = MI.getOperand(0).getReg();
1793 
1794  MachineInstr *ZExtInstr =
1795  MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
1796  assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
1797  "Expecting a G_ZEXT");
1798 
1799  Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
1800  LLT Dst0Ty = MRI.getType(Dst0Reg);
1801  LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
1802 
1804 
1805  if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
1806  Builder.buildZExt(Dst0Reg, ZExtSrcReg);
1807  } else {
1808  assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
1809  "ZExt src doesn't fit in destination");
1810  replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
1811  }
1812 
1813  Register ZeroReg;
1814  for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1815  if (!ZeroReg)
1816  ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
1817  replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
1818  }
1819  MI.eraseFromParent();
1820 }
1821 
1823  unsigned TargetShiftSize,
1824  unsigned &ShiftVal) {
1825  assert((MI.getOpcode() == TargetOpcode::G_SHL ||
1826  MI.getOpcode() == TargetOpcode::G_LSHR ||
1827  MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
1828 
1829  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
1830  if (Ty.isVector()) // TODO:
1831  return false;
1832 
1833  // Don't narrow further than the requested size.
1834  unsigned Size = Ty.getSizeInBits();
1835  if (Size <= TargetShiftSize)
1836  return false;
1837 
1838  auto MaybeImmVal =
1839  getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1840  if (!MaybeImmVal)
1841  return false;
1842 
1843  ShiftVal = MaybeImmVal->Value.getSExtValue();
1844  return ShiftVal >= Size / 2 && ShiftVal < Size;
1845 }
1846 
1848  const unsigned &ShiftVal) {
1849  Register DstReg = MI.getOperand(0).getReg();
1850  Register SrcReg = MI.getOperand(1).getReg();
1851  LLT Ty = MRI.getType(SrcReg);
1852  unsigned Size = Ty.getSizeInBits();
1853  unsigned HalfSize = Size / 2;
1854  assert(ShiftVal >= HalfSize);
1855 
1856  LLT HalfTy = LLT::scalar(HalfSize);
1857 
1858  Builder.setInstr(MI);
1859  auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
1860  unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1861 
1862  if (MI.getOpcode() == TargetOpcode::G_LSHR) {
1863  Register Narrowed = Unmerge.getReg(1);
1864 
1865  // dst = G_LSHR s64:x, C for C >= 32
1866  // =>
1867  // lo, hi = G_UNMERGE_VALUES x
1868  // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
1869 
1870  if (NarrowShiftAmt != 0) {
1871  Narrowed = Builder.buildLShr(HalfTy, Narrowed,
1872  Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1873  }
1874 
1875  auto Zero = Builder.buildConstant(HalfTy, 0);
1876  Builder.buildMerge(DstReg, { Narrowed, Zero });
1877  } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
1878  Register Narrowed = Unmerge.getReg(0);
1879  // dst = G_SHL s64:x, C for C >= 32
1880  // =>
1881  // lo, hi = G_UNMERGE_VALUES x
1882  // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
1883  if (NarrowShiftAmt != 0) {
1884  Narrowed = Builder.buildShl(HalfTy, Narrowed,
1885  Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
1886  }
1887 
1888  auto Zero = Builder.buildConstant(HalfTy, 0);
1889  Builder.buildMerge(DstReg, { Zero, Narrowed });
1890  } else {
1891  assert(MI.getOpcode() == TargetOpcode::G_ASHR);
1892  auto Hi = Builder.buildAShr(
1893  HalfTy, Unmerge.getReg(1),
1894  Builder.buildConstant(HalfTy, HalfSize - 1));
1895 
1896  if (ShiftVal == HalfSize) {
1897  // (G_ASHR i64:x, 32) ->
1898  // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
1899  Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
1900  } else if (ShiftVal == Size - 1) {
1901  // Don't need a second shift.
1902  // (G_ASHR i64:x, 63) ->
1903  // %narrowed = (G_ASHR hi_32(x), 31)
1904  // G_MERGE_VALUES %narrowed, %narrowed
1905  Builder.buildMerge(DstReg, { Hi, Hi });
1906  } else {
1907  auto Lo = Builder.buildAShr(
1908  HalfTy, Unmerge.getReg(1),
1909  Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
1910 
1911  // (G_ASHR i64:x, C) ->, for C >= 32
1912  // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
1913  Builder.buildMerge(DstReg, { Lo, Hi });
1914  }
1915  }
1916 
1917  MI.eraseFromParent();
1918 }
1919 
1921  unsigned TargetShiftAmount) {
1922  unsigned ShiftAmt;
1923  if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
1924  applyCombineShiftToUnmerge(MI, ShiftAmt);
1925  return true;
1926  }
1927 
1928  return false;
1929 }
1930 
1932  assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1933  Register DstReg = MI.getOperand(0).getReg();
1934  LLT DstTy = MRI.getType(DstReg);
1935  Register SrcReg = MI.getOperand(1).getReg();
1936  return mi_match(SrcReg, MRI,
1938 }
1939 
1941  assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
1942  Register DstReg = MI.getOperand(0).getReg();
1943  Builder.setInstr(MI);
1944  Builder.buildCopy(DstReg, Reg);
1945  MI.eraseFromParent();
1946 }
1947 
1949  assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1950  Register SrcReg = MI.getOperand(1).getReg();
1951  return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
1952 }
1953 
1955  assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
1956  Register DstReg = MI.getOperand(0).getReg();
1957  Builder.setInstr(MI);
1958  Builder.buildZExtOrTrunc(DstReg, Reg);
1959  MI.eraseFromParent();
1960 }
1961 
1963  MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1964  assert(MI.getOpcode() == TargetOpcode::G_ADD);
1965  Register LHS = MI.getOperand(1).getReg();
1966  Register RHS = MI.getOperand(2).getReg();
1967  LLT IntTy = MRI.getType(LHS);
1968 
1969  // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
1970  // instruction.
1971  PtrReg.second = false;
1972  for (Register SrcReg : {LHS, RHS}) {
1973  if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
1974  // Don't handle cases where the integer is implicitly converted to the
1975  // pointer width.
1976  LLT PtrTy = MRI.getType(PtrReg.first);
1977  if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
1978  return true;
1979  }
1980 
1981  PtrReg.second = true;
1982  }
1983 
1984  return false;
1985 }
1986 
1988  MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
1989  Register Dst = MI.getOperand(0).getReg();
1990  Register LHS = MI.getOperand(1).getReg();
1991  Register RHS = MI.getOperand(2).getReg();
1992 
1993  const bool DoCommute = PtrReg.second;
1994  if (DoCommute)
1995  std::swap(LHS, RHS);
1996  LHS = PtrReg.first;
1997 
1998  LLT PtrTy = MRI.getType(LHS);
1999 
2001  auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2002  Builder.buildPtrToInt(Dst, PtrAdd);
2003  MI.eraseFromParent();
2004 }
2005 
2007  int64_t &NewCst) {
2008  auto &PtrAdd = cast<GPtrAdd>(MI);
2009  Register LHS = PtrAdd.getBaseReg();
2010  Register RHS = PtrAdd.getOffsetReg();
2012 
2013  if (auto RHSCst = getIConstantVRegSExtVal(RHS, MRI)) {
2014  int64_t Cst;
2015  if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2016  NewCst = Cst + *RHSCst;
2017  return true;
2018  }
2019  }
2020 
2021  return false;
2022 }
2023 
2025  int64_t &NewCst) {
2026  auto &PtrAdd = cast<GPtrAdd>(MI);
2027  Register Dst = PtrAdd.getReg(0);
2028 
2030  Builder.buildConstant(Dst, NewCst);
2031  PtrAdd.eraseFromParent();
2032 }
2033 
2035  assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2036  Register DstReg = MI.getOperand(0).getReg();
2037  Register SrcReg = MI.getOperand(1).getReg();
2038  LLT DstTy = MRI.getType(DstReg);
2039  return mi_match(SrcReg, MRI,
2041 }
2042 
2044  assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2045  Register DstReg = MI.getOperand(0).getReg();
2046  Register SrcReg = MI.getOperand(1).getReg();
2047  LLT DstTy = MRI.getType(DstReg);
2048  if (mi_match(SrcReg, MRI,
2049  m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2050  unsigned DstSize = DstTy.getScalarSizeInBits();
2051  unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2052  return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2053  }
2054  return false;
2055 }
2056 
2058  MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2059  assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2060  MI.getOpcode() == TargetOpcode::G_SEXT ||
2061  MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2062  "Expected a G_[ASZ]EXT");
2063  Register SrcReg = MI.getOperand(1).getReg();
2064  MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2065  // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2066  unsigned Opc = MI.getOpcode();
2067  unsigned SrcOpc = SrcMI->getOpcode();
2068  if (Opc == SrcOpc ||
2069  (Opc == TargetOpcode::G_ANYEXT &&
2070  (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2071  (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2072  MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2073  return true;
2074  }
2075  return false;
2076 }
2077 
2079  MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2080  assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2081  MI.getOpcode() == TargetOpcode::G_SEXT ||
2082  MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2083  "Expected a G_[ASZ]EXT");
2084 
2085  Register Reg = std::get<0>(MatchInfo);
2086  unsigned SrcExtOp = std::get<1>(MatchInfo);
2087 
2088  // Combine exts with the same opcode.
2089  if (MI.getOpcode() == SrcExtOp) {
2091  MI.getOperand(1).setReg(Reg);
2093  return;
2094  }
2095 
2096  // Combine:
2097  // - anyext([sz]ext x) to [sz]ext x
2098  // - sext(zext x) to zext x
2099  if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2100  (MI.getOpcode() == TargetOpcode::G_SEXT &&
2101  SrcExtOp == TargetOpcode::G_ZEXT)) {
2102  Register DstReg = MI.getOperand(0).getReg();
2104  Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2105  MI.eraseFromParent();
2106  }
2107 }
2108 
2110  assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2111  Register DstReg = MI.getOperand(0).getReg();
2112  Register SrcReg = MI.getOperand(1).getReg();
2113  LLT DstTy = MRI.getType(DstReg);
2114 
2116  Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2117  MI.getFlags());
2118  MI.eraseFromParent();
2119 }
2120 
2122  assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2123  Register SrcReg = MI.getOperand(1).getReg();
2124  return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2125 }
2126 
2128  assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2129  Src = MI.getOperand(1).getReg();
2130  Register AbsSrc;
2131  return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2132 }
2133 
2135  MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2136  assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2137  Register SrcReg = MI.getOperand(1).getReg();
2138  MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2139  unsigned SrcOpc = SrcMI->getOpcode();
2140  if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2141  SrcOpc == TargetOpcode::G_ZEXT) {
2142  MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2143  return true;
2144  }
2145  return false;
2146 }
2147 
2149  MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2150  assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2151  Register SrcReg = MatchInfo.first;
2152  unsigned SrcExtOp = MatchInfo.second;
2153  Register DstReg = MI.getOperand(0).getReg();
2154  LLT SrcTy = MRI.getType(SrcReg);
2155  LLT DstTy = MRI.getType(DstReg);
2156  if (SrcTy == DstTy) {
2157  MI.eraseFromParent();
2158  replaceRegWith(MRI, DstReg, SrcReg);
2159  return;
2160  }
2162  if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2163  Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2164  else
2165  Builder.buildTrunc(DstReg, SrcReg);
2166  MI.eraseFromParent();
2167 }
2168 
2170  MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2171  assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2172  Register DstReg = MI.getOperand(0).getReg();
2173  Register SrcReg = MI.getOperand(1).getReg();
2174  LLT DstTy = MRI.getType(DstReg);
2175  Register ShiftSrc;
2176  Register ShiftAmt;
2177 
2178  if (MRI.hasOneNonDBGUse(SrcReg) &&
2179  mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2181  {TargetOpcode::G_SHL,
2182  {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2183  KnownBits Known = KB->getKnownBits(ShiftAmt);
2184  unsigned Size = DstTy.getSizeInBits();
2185  if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
2186  MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2187  return true;
2188  }
2189  }
2190  return false;
2191 }
2192 
2194  MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2195  assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2196  Register DstReg = MI.getOperand(0).getReg();
2197  Register SrcReg = MI.getOperand(1).getReg();
2198  LLT DstTy = MRI.getType(DstReg);
2199  MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2200 
2201  Register ShiftSrc = MatchInfo.first;
2202  Register ShiftAmt = MatchInfo.second;
2204  auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2205  Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
2206  MI.eraseFromParent();
2207 }
2208 
2210  return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2211  return MO.isReg() &&
2212  getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2213  });
2214 }
2215 
2217  return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2218  return !MO.isReg() ||
2219  getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2220  });
2221 }
2222 
2224  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2225  ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2226  return all_of(Mask, [](int Elt) { return Elt < 0; });
2227 }
2228 
2230  assert(MI.getOpcode() == TargetOpcode::G_STORE);
2231  return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2232  MRI);
2233 }
2234 
2236  assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2237  return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2238  MRI);
2239 }
2240 
2242  assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2243  if (auto MaybeCstCmp =
2244  getIConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
2245  OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2;
2246  return true;
2247  }
2248  return false;
2249 }
2250 
2252  MI.eraseFromParent();
2253  return true;
2254 }
2255 
2257  const MachineOperand &MOP2) {
2258  if (!MOP1.isReg() || !MOP2.isReg())
2259  return false;
2260  auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2261  if (!InstAndDef1)
2262  return false;
2263  auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2264  if (!InstAndDef2)
2265  return false;
2266  MachineInstr *I1 = InstAndDef1->MI;
2267  MachineInstr *I2 = InstAndDef2->MI;
2268 
2269  // Handle a case like this:
2270  //
2271  // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2272  //
2273  // Even though %0 and %1 are produced by the same instruction they are not
2274  // the same values.
2275  if (I1 == I2)
2276  return MOP1.getReg() == MOP2.getReg();
2277 
2278  // If we have an instruction which loads or stores, we can't guarantee that
2279  // it is identical.
2280  //
2281  // For example, we may have
2282  //
2283  // %x1 = G_LOAD %addr (load N from @somewhere)
2284  // ...
2285  // call @foo
2286  // ...
2287  // %x2 = G_LOAD %addr (load N from @somewhere)
2288  // ...
2289  // %or = G_OR %x1, %x2
2290  //
2291  // It's possible that @foo will modify whatever lives at the address we're
2292  // loading from. To be safe, let's just assume that all loads and stores
2293  // are different (unless we have something which is guaranteed to not
2294  // change.)
2295  if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
2296  return false;
2297 
2298  // Check for physical registers on the instructions first to avoid cases
2299  // like this:
2300  //
2301  // %a = COPY $physreg
2302  // ...
2303  // SOMETHING implicit-def $physreg
2304  // ...
2305  // %b = COPY $physreg
2306  //
2307  // These copies are not equivalent.
2308  if (any_of(I1->uses(), [](const MachineOperand &MO) {
2309  return MO.isReg() && MO.getReg().isPhysical();
2310  })) {
2311  // Check if we have a case like this:
2312  //
2313  // %a = COPY $physreg
2314  // %b = COPY %a
2315  //
2316  // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2317  // From that, we know that they must have the same value, since they must
2318  // have come from the same COPY.
2319  return I1->isIdenticalTo(*I2);
2320  }
2321 
2322  // We don't have any physical registers, so we don't necessarily need the
2323  // same vreg defs.
2324  //
2325  // On the off-chance that there's some target instruction feeding into the
2326  // instruction, let's use produceSameValue instead of isIdenticalTo.
2327  if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2328  // Handle instructions with multiple defs that produce same values. Values
2329  // are same for operands with same index.
2330  // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2331  // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2332  // I1 and I2 are different instructions but produce same values,
2333  // %1 and %6 are same, %1 and %7 are not the same value.
2334  return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2335  I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2336  }
2337  return false;
2338 }
2339 
2341  if (!MOP.isReg())
2342  return false;
2343  // MIPatternMatch doesn't let us look through G_ZEXT etc.
2344  auto ValAndVReg = getIConstantVRegValWithLookThrough(MOP.getReg(), MRI);
2345  return ValAndVReg && ValAndVReg->Value == C;
2346 }
2347 
2349  unsigned OpIdx) {
2350  assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2351  Register OldReg = MI.getOperand(0).getReg();
2352  Register Replacement = MI.getOperand(OpIdx).getReg();
2353  assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2354  MI.eraseFromParent();
2355  replaceRegWith(MRI, OldReg, Replacement);
2356  return true;
2357 }
2358 
2360  Register Replacement) {
2361  assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2362  Register OldReg = MI.getOperand(0).getReg();
2363  assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2364  MI.eraseFromParent();
2365  replaceRegWith(MRI, OldReg, Replacement);
2366  return true;
2367 }
2368 
2370  assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2371  // Match (cond ? x : x)
2372  return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2373  canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2374  MRI);
2375 }
2376 
2378  return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2379  canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2380  MRI);
2381 }
2382 
2384  return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2385  canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2386  MRI);
2387 }
2388 
2390  MachineOperand &MO = MI.getOperand(OpIdx);
2391  return MO.isReg() &&
2392  getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2393 }
2394 
2396  unsigned OpIdx) {
2397  MachineOperand &MO = MI.getOperand(OpIdx);
2398  return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2399 }
2400 
2402  assert(MI.getNumDefs() == 1 && "Expected only one def?");
2403  Builder.setInstr(MI);
2404  Builder.buildFConstant(MI.getOperand(0), C);
2405  MI.eraseFromParent();
2406  return true;
2407 }
2408 
2410  assert(MI.getNumDefs() == 1 && "Expected only one def?");
2411  Builder.setInstr(MI);
2412  Builder.buildConstant(MI.getOperand(0), C);
2413  MI.eraseFromParent();
2414  return true;
2415 }
2416 
2418  assert(MI.getNumDefs() == 1 && "Expected only one def?");
2419  Builder.setInstr(MI);
2420  Builder.buildConstant(MI.getOperand(0), C);
2421  MI.eraseFromParent();
2422  return true;
2423 }
2424 
2426  assert(MI.getNumDefs() == 1 && "Expected only one def?");
2427  Builder.setInstr(MI);
2428  Builder.buildUndef(MI.getOperand(0));
2429  MI.eraseFromParent();
2430  return true;
2431 }
2432 
2434  MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2435  Register LHS = MI.getOperand(1).getReg();
2436  Register RHS = MI.getOperand(2).getReg();
2437  Register &NewLHS = std::get<0>(MatchInfo);
2438  Register &NewRHS = std::get<1>(MatchInfo);
2439 
2440  // Helper lambda to check for opportunities for
2441  // ((0-A) + B) -> B - A
2442  // (A + (0-B)) -> A - B
2443  auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2444  if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2445  return false;
2446  NewLHS = MaybeNewLHS;
2447  return true;
2448  };
2449 
2450  return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2451 }
2452 
2454  MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2455  assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2456  "Invalid opcode");
2457  Register DstReg = MI.getOperand(0).getReg();
2458  LLT DstTy = MRI.getType(DstReg);
2459  assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2460  unsigned NumElts = DstTy.getNumElements();
2461  // If this MI is part of a sequence of insert_vec_elts, then
2462  // don't do the combine in the middle of the sequence.
2463  if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2464  TargetOpcode::G_INSERT_VECTOR_ELT)
2465  return false;
2466  MachineInstr *CurrInst = &MI;
2467  MachineInstr *TmpInst;
2468  int64_t IntImm;
2469  Register TmpReg;
2470  MatchInfo.resize(NumElts);
2471  while (mi_match(
2472  CurrInst->getOperand(0).getReg(), MRI,
2473  m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2474  if (IntImm >= NumElts)
2475  return false;
2476  if (!MatchInfo[IntImm])
2477  MatchInfo[IntImm] = TmpReg;
2478  CurrInst = TmpInst;
2479  }
2480  // Variable index.
2481  if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2482  return false;
2483  if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2484  for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2485  if (!MatchInfo[I - 1].isValid())
2486  MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2487  }
2488  return true;
2489  }
2490  // If we didn't end in a G_IMPLICIT_DEF, bail out.
2491  return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2492 }
2493 
2495  MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2496  Builder.setInstr(MI);
2497  Register UndefReg;
2498  auto GetUndef = [&]() {
2499  if (UndefReg)
2500  return UndefReg;
2501  LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2502  UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2503  return UndefReg;
2504  };
2505  for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2506  if (!MatchInfo[I])
2507  MatchInfo[I] = GetUndef();
2508  }
2509  Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2510  MI.eraseFromParent();
2511 }
2512 
2514  MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2515  Builder.setInstr(MI);
2516  Register SubLHS, SubRHS;
2517  std::tie(SubLHS, SubRHS) = MatchInfo;
2518  Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2519  MI.eraseFromParent();
2520 }
2521 
2523  MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2524  // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2525  //
2526  // Creates the new hand + logic instruction (but does not insert them.)
2527  //
2528  // On success, MatchInfo is populated with the new instructions. These are
2529  // inserted in applyHoistLogicOpWithSameOpcodeHands.
2530  unsigned LogicOpcode = MI.getOpcode();
2531  assert(LogicOpcode == TargetOpcode::G_AND ||
2532  LogicOpcode == TargetOpcode::G_OR ||
2533  LogicOpcode == TargetOpcode::G_XOR);
2534  MachineIRBuilder MIB(MI);
2535  Register Dst = MI.getOperand(0).getReg();
2536  Register LHSReg = MI.getOperand(1).getReg();
2537  Register RHSReg = MI.getOperand(2).getReg();
2538 
2539  // Don't recompute anything.
2540  if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2541  return false;
2542 
2543  // Make sure we have (hand x, ...), (hand y, ...)
2544  MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2545  MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2546  if (!LeftHandInst || !RightHandInst)
2547  return false;
2548  unsigned HandOpcode = LeftHandInst->getOpcode();
2549  if (HandOpcode != RightHandInst->getOpcode())
2550  return false;
2551  if (!LeftHandInst->getOperand(1).isReg() ||
2552  !RightHandInst->getOperand(1).isReg())
2553  return false;
2554 
2555  // Make sure the types match up, and if we're doing this post-legalization,
2556  // we end up with legal types.
2557  Register X = LeftHandInst->getOperand(1).getReg();
2558  Register Y = RightHandInst->getOperand(1).getReg();
2559  LLT XTy = MRI.getType(X);
2560  LLT YTy = MRI.getType(Y);
2561  if (XTy != YTy)
2562  return false;
2563  if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2564  return false;
2565 
2566  // Optional extra source register.
2567  Register ExtraHandOpSrcReg;
2568  switch (HandOpcode) {
2569  default:
2570  return false;
2571  case TargetOpcode::G_ANYEXT:
2572  case TargetOpcode::G_SEXT:
2573  case TargetOpcode::G_ZEXT: {
2574  // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2575  break;
2576  }
2577  case TargetOpcode::G_AND:
2578  case TargetOpcode::G_ASHR:
2579  case TargetOpcode::G_LSHR:
2580  case TargetOpcode::G_SHL: {
2581  // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2582  MachineOperand &ZOp = LeftHandInst->getOperand(2);
2583  if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2584  return false;
2585  ExtraHandOpSrcReg = ZOp.getReg();
2586  break;
2587  }
2588  }
2589 
2590  // Record the steps to build the new instructions.
2591  //
2592  // Steps to build (logic x, y)
2593  auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2594  OperandBuildSteps LogicBuildSteps = {
2595  [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2596  [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2597  [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2598  InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2599 
2600  // Steps to build hand (logic x, y), ...z
2601  OperandBuildSteps HandBuildSteps = {
2602  [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2603  [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2604  if (ExtraHandOpSrcReg.isValid())
2605  HandBuildSteps.push_back(
2606  [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2607  InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2608 
2609  MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2610  return true;
2611 }
2612 
2614  MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2615  assert(MatchInfo.InstrsToBuild.size() &&
2616  "Expected at least one instr to build?");
2617  Builder.setInstr(MI);
2618  for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2619  assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2620  assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2621  MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2622  for (auto &OperandFn : InstrToBuild.OperandFns)
2623  OperandFn(Instr);
2624  }
2625  MI.eraseFromParent();
2626 }
2627 
2629  MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2630  assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2631  int64_t ShlCst, AshrCst;
2632  Register Src;
2633  // FIXME: detect splat constant vectors.
2634  if (!mi_match(MI.getOperand(0).getReg(), MRI,
2635  m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2636  return false;
2637  if (ShlCst != AshrCst)
2638  return false;
2640  {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2641  return false;
2642  MatchInfo = std::make_tuple(Src, ShlCst);
2643  return true;
2644 }
2645 
2647  MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2648  assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2649  Register Src;
2650  int64_t ShiftAmt;
2651  std::tie(Src, ShiftAmt) = MatchInfo;
2652  unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2654  Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2655  MI.eraseFromParent();
2656 }
2657 
2658 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2660  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2661  assert(MI.getOpcode() == TargetOpcode::G_AND);
2662 
2663  Register Dst = MI.getOperand(0).getReg();
2664  LLT Ty = MRI.getType(Dst);
2665 
2666  Register R;
2667  int64_t C1;
2668  int64_t C2;
2669  if (!mi_match(
2670  Dst, MRI,
2671  m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
2672  return false;
2673 
2674  MatchInfo = [=](MachineIRBuilder &B) {
2675  if (C1 & C2) {
2676  B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
2677  return;
2678  }
2679  auto Zero = B.buildConstant(Ty, 0);
2680  replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
2681  };
2682  return true;
2683 }
2684 
2686  Register &Replacement) {
2687  // Given
2688  //
2689  // %y:_(sN) = G_SOMETHING
2690  // %x:_(sN) = G_SOMETHING
2691  // %res:_(sN) = G_AND %x, %y
2692  //
2693  // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2694  //
2695  // Patterns like this can appear as a result of legalization. E.g.
2696  //
2697  // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2698  // %one:_(s32) = G_CONSTANT i32 1
2699  // %and:_(s32) = G_AND %cmp, %one
2700  //
2701  // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2702  assert(MI.getOpcode() == TargetOpcode::G_AND);
2703  if (!KB)
2704  return false;
2705 
2706  Register AndDst = MI.getOperand(0).getReg();
2707  LLT DstTy = MRI.getType(AndDst);
2708 
2709  // FIXME: This should be removed once GISelKnownBits supports vectors.
2710  if (DstTy.isVector())
2711  return false;
2712 
2713  Register LHS = MI.getOperand(1).getReg();
2714  Register RHS = MI.getOperand(2).getReg();
2715  KnownBits LHSBits = KB->getKnownBits(LHS);
2716  KnownBits RHSBits = KB->getKnownBits(RHS);
2717 
2718  // Check that x & Mask == x.
2719  // x & 1 == x, always
2720  // x & 0 == x, only if x is also 0
2721  // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2722  //
2723  // Check if we can replace AndDst with the LHS of the G_AND
2724  if (canReplaceReg(AndDst, LHS, MRI) &&
2725  (LHSBits.Zero | RHSBits.One).isAllOnes()) {
2726  Replacement = LHS;
2727  return true;
2728  }
2729 
2730  // Check if we can replace AndDst with the RHS of the G_AND
2731  if (canReplaceReg(AndDst, RHS, MRI) &&
2732  (LHSBits.One | RHSBits.Zero).isAllOnes()) {
2733  Replacement = RHS;
2734  return true;
2735  }
2736 
2737  return false;
2738 }
2739 
2741  // Given
2742  //
2743  // %y:_(sN) = G_SOMETHING
2744  // %x:_(sN) = G_SOMETHING
2745  // %res:_(sN) = G_OR %x, %y
2746  //
2747  // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2748  assert(MI.getOpcode() == TargetOpcode::G_OR);
2749  if (!KB)
2750  return false;
2751 
2752  Register OrDst = MI.getOperand(0).getReg();
2753  LLT DstTy = MRI.getType(OrDst);
2754 
2755  // FIXME: This should be removed once GISelKnownBits supports vectors.
2756  if (DstTy.isVector())
2757  return false;
2758 
2759  Register LHS = MI.getOperand(1).getReg();
2760  Register RHS = MI.getOperand(2).getReg();
2761  KnownBits LHSBits = KB->getKnownBits(LHS);
2762  KnownBits RHSBits = KB->getKnownBits(RHS);
2763 
2764  // Check that x | Mask == x.
2765  // x | 0 == x, always
2766  // x | 1 == x, only if x is also 1
2767  // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2768  //
2769  // Check if we can replace OrDst with the LHS of the G_OR
2770  if (canReplaceReg(OrDst, LHS, MRI) &&
2771  (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
2772  Replacement = LHS;
2773  return true;
2774  }
2775 
2776  // Check if we can replace OrDst with the RHS of the G_OR
2777  if (canReplaceReg(OrDst, RHS, MRI) &&
2778  (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
2779  Replacement = RHS;
2780  return true;
2781  }
2782 
2783  return false;
2784 }
2785 
2787  // If the input is already sign extended, just drop the extension.
2788  Register Src = MI.getOperand(1).getReg();
2789  unsigned ExtBits = MI.getOperand(2).getImm();
2790  unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
2791  return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
2792 }
2793 
2794 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
2795  int64_t Cst, bool IsVector, bool IsFP) {
2796  // For i1, Cst will always be -1 regardless of boolean contents.
2797  return (ScalarSizeBits == 1 && Cst == -1) ||
2798  isConstTrueVal(TLI, Cst, IsVector, IsFP);
2799 }
2800 
2802  SmallVectorImpl<Register> &RegsToNegate) {
2803  assert(MI.getOpcode() == TargetOpcode::G_XOR);
2804  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2805  const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
2806  Register XorSrc;
2807  Register CstReg;
2808  // We match xor(src, true) here.
2809  if (!mi_match(MI.getOperand(0).getReg(), MRI,
2810  m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
2811  return false;
2812 
2813  if (!MRI.hasOneNonDBGUse(XorSrc))
2814  return false;
2815 
2816  // Check that XorSrc is the root of a tree of comparisons combined with ANDs
2817  // and ORs. The suffix of RegsToNegate starting from index I is used a work
2818  // list of tree nodes to visit.
2819  RegsToNegate.push_back(XorSrc);
2820  // Remember whether the comparisons are all integer or all floating point.
2821  bool IsInt = false;
2822  bool IsFP = false;
2823  for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
2824  Register Reg = RegsToNegate[I];
2825  if (!MRI.hasOneNonDBGUse(Reg))
2826  return false;
2828  switch (Def->getOpcode()) {
2829  default:
2830  // Don't match if the tree contains anything other than ANDs, ORs and
2831  // comparisons.
2832  return false;
2833  case TargetOpcode::G_ICMP:
2834  if (IsFP)
2835  return false;
2836  IsInt = true;
2837  // When we apply the combine we will invert the predicate.
2838  break;
2839  case TargetOpcode::G_FCMP:
2840  if (IsInt)
2841  return false;
2842  IsFP = true;
2843  // When we apply the combine we will invert the predicate.
2844  break;
2845  case TargetOpcode::G_AND:
2846  case TargetOpcode::G_OR:
2847  // Implement De Morgan's laws:
2848  // ~(x & y) -> ~x | ~y
2849  // ~(x | y) -> ~x & ~y
2850  // When we apply the combine we will change the opcode and recursively
2851  // negate the operands.
2852  RegsToNegate.push_back(Def->getOperand(1).getReg());
2853  RegsToNegate.push_back(Def->getOperand(2).getReg());
2854  break;
2855  }
2856  }
2857 
2858  // Now we know whether the comparisons are integer or floating point, check
2859  // the constant in the xor.
2860  int64_t Cst;
2861  if (Ty.isVector()) {
2862  MachineInstr *CstDef = MRI.getVRegDef(CstReg);
2863  auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
2864  if (!MaybeCst)
2865  return false;
2866  if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
2867  return false;
2868  } else {
2869  if (!mi_match(CstReg, MRI, m_ICst(Cst)))
2870  return false;
2871  if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
2872  return false;
2873  }
2874 
2875  return true;
2876 }
2877 
2879  SmallVectorImpl<Register> &RegsToNegate) {
2880  for (Register Reg : RegsToNegate) {
2883  // For each comparison, invert the opcode. For each AND and OR, change the
2884  // opcode.
2885  switch (Def->getOpcode()) {
2886  default:
2887  llvm_unreachable("Unexpected opcode");
2888  case TargetOpcode::G_ICMP:
2889  case TargetOpcode::G_FCMP: {
2890  MachineOperand &PredOp = Def->getOperand(1);
2892  (CmpInst::Predicate)PredOp.getPredicate());
2893  PredOp.setPredicate(NewP);
2894  break;
2895  }
2896  case TargetOpcode::G_AND:
2897  Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
2898  break;
2899  case TargetOpcode::G_OR:
2900  Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
2901  break;
2902  }
2904  }
2905 
2906  replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
2907  MI.eraseFromParent();
2908 }
2909 
2911  MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2912  // Match (xor (and x, y), y) (or any of its commuted cases)
2913  assert(MI.getOpcode() == TargetOpcode::G_XOR);
2914  Register &X = MatchInfo.first;
2915  Register &Y = MatchInfo.second;
2916  Register AndReg = MI.getOperand(1).getReg();
2917  Register SharedReg = MI.getOperand(2).getReg();
2918 
2919  // Find a G_AND on either side of the G_XOR.
2920  // Look for one of
2921  //
2922  // (xor (and x, y), SharedReg)
2923  // (xor SharedReg, (and x, y))
2924  if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
2925  std::swap(AndReg, SharedReg);
2926  if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
2927  return false;
2928  }
2929 
2930  // Only do this if we'll eliminate the G_AND.
2931  if (!MRI.hasOneNonDBGUse(AndReg))
2932  return false;
2933 
2934  // We can combine if SharedReg is the same as either the LHS or RHS of the
2935  // G_AND.
2936  if (Y != SharedReg)
2937  std::swap(X, Y);
2938  return Y == SharedReg;
2939 }
2940 
2942  MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2943  // Fold (xor (and x, y), y) -> (and (not x), y)
2945  Register X, Y;
2946  std::tie(X, Y) = MatchInfo;
2947  auto Not = Builder.buildNot(MRI.getType(X), X);
2949  MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
2950  MI.getOperand(1).setReg(Not->getOperand(0).getReg());
2951  MI.getOperand(2).setReg(Y);
2953 }
2954 
2956  auto &PtrAdd = cast<GPtrAdd>(MI);
2957  Register DstReg = PtrAdd.getReg(0);
2958  LLT Ty = MRI.getType(DstReg);
2959  const DataLayout &DL = Builder.getMF().getDataLayout();
2960 
2961  if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
2962  return false;
2963 
2964  if (Ty.isPointer()) {
2965  auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
2966  return ConstVal && *ConstVal == 0;
2967  }
2968 
2969  assert(Ty.isVector() && "Expecting a vector type");
2970  const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
2971  return isBuildVectorAllZeros(*VecMI, MRI);
2972 }
2973 
2975  auto &PtrAdd = cast<GPtrAdd>(MI);
2976  Builder.setInstrAndDebugLoc(PtrAdd);
2977  Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
2978  PtrAdd.eraseFromParent();
2979 }
2980 
2981 /// The second source operand is known to be a power of 2.
2983  Register DstReg = MI.getOperand(0).getReg();
2984  Register Src0 = MI.getOperand(1).getReg();
2985  Register Pow2Src1 = MI.getOperand(2).getReg();
2986  LLT Ty = MRI.getType(DstReg);
2988 
2989  // Fold (urem x, pow2) -> (and x, pow2-1)
2990  auto NegOne = Builder.buildConstant(Ty, -1);
2991  auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
2992  Builder.buildAnd(DstReg, Src0, Add);
2993  MI.eraseFromParent();
2994 }
2995 
2997 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
2998  assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
2999  // We want to detect if Root is part of a tree which represents a bunch
3000  // of loads being merged into a larger load. We'll try to recognize patterns
3001  // like, for example:
3002  //
3003  // Reg Reg
3004  // \ /
3005  // OR_1 Reg
3006  // \ /
3007  // OR_2
3008  // \ Reg
3009  // .. /
3010  // Root
3011  //
3012  // Reg Reg Reg Reg
3013  // \ / \ /
3014  // OR_1 OR_2
3015  // \ /
3016  // \ /
3017  // ...
3018  // Root
3019  //
3020  // Each "Reg" may have been produced by a load + some arithmetic. This
3021  // function will save each of them.
3022  SmallVector<Register, 8> RegsToVisit;
3024 
3025  // In the "worst" case, we're dealing with a load for each byte. So, there
3026  // are at most #bytes - 1 ORs.
3027  const unsigned MaxIter =
3028  MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3029  for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3030  if (Ors.empty())
3031  break;
3032  const MachineInstr *Curr = Ors.pop_back_val();
3033  Register OrLHS = Curr->getOperand(1).getReg();
3034  Register OrRHS = Curr->getOperand(2).getReg();
3035 
3036  // In the combine, we want to elimate the entire tree.
3037  if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3038  return None;
3039 
3040  // If it's a G_OR, save it and continue to walk. If it's not, then it's
3041  // something that may be a load + arithmetic.
3042  if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3043  Ors.push_back(Or);
3044  else
3045  RegsToVisit.push_back(OrLHS);
3046  if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3047  Ors.push_back(Or);
3048  else
3049  RegsToVisit.push_back(OrRHS);
3050  }
3051 
3052  // We're going to try and merge each register into a wider power-of-2 type,
3053  // so we ought to have an even number of registers.
3054  if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3055  return None;
3056  return RegsToVisit;
3057 }
3058 
3059 /// Helper function for findLoadOffsetsForLoadOrCombine.
3060 ///
3061 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3062 /// and then moving that value into a specific byte offset.
3063 ///
3064 /// e.g. x[i] << 24
3065 ///
3066 /// \returns The load instruction and the byte offset it is moved into.
3068 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3069  const MachineRegisterInfo &MRI) {
3071  "Expected Reg to only have one non-debug use?");
3072  Register MaybeLoad;
3073  int64_t Shift;
3074  if (!mi_match(Reg, MRI,
3075  m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3076  Shift = 0;
3077  MaybeLoad = Reg;
3078  }
3079 
3080  if (Shift % MemSizeInBits != 0)
3081  return None;
3082 
3083  // TODO: Handle other types of loads.
3084  auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3085  if (!Load)
3086  return None;
3087 
3088  if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3089  return None;
3090 
3091  return std::make_pair(Load, Shift / MemSizeInBits);
3092 }
3093 
3095 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3096  SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3097  const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3098 
3099  // Each load found for the pattern. There should be one for each RegsToVisit.
3101 
3102  // The lowest index used in any load. (The lowest "i" for each x[i].)
3103  int64_t LowestIdx = INT64_MAX;
3104 
3105  // The load which uses the lowest index.
3106  GZExtLoad *LowestIdxLoad = nullptr;
3107 
3108  // Keeps track of the load indices we see. We shouldn't see any indices twice.
3109  SmallSet<int64_t, 8> SeenIdx;
3110 
3111  // Ensure each load is in the same MBB.
3112  // TODO: Support multiple MachineBasicBlocks.
3113  MachineBasicBlock *MBB = nullptr;
3114  const MachineMemOperand *MMO = nullptr;
3115 
3116  // Earliest instruction-order load in the pattern.
3117  GZExtLoad *EarliestLoad = nullptr;
3118 
3119  // Latest instruction-order load in the pattern.
3120  GZExtLoad *LatestLoad = nullptr;
3121 
3122  // Base pointer which every load should share.
3123  Register BasePtr;
3124 
3125  // We want to find a load for each register. Each load should have some
3126  // appropriate bit twiddling arithmetic. During this loop, we will also keep
3127  // track of the load which uses the lowest index. Later, we will check if we
3128  // can use its pointer in the final, combined load.
3129  for (auto Reg : RegsToVisit) {
3130  // Find the load, and find the position that it will end up in (e.g. a
3131  // shifted) value.
3132  auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3133  if (!LoadAndPos)
3134  return None;
3135  GZExtLoad *Load;
3136  int64_t DstPos;
3137  std::tie(Load, DstPos) = *LoadAndPos;
3138 
3139  // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3140  // it is difficult to check for stores/calls/etc between loads.
3141  MachineBasicBlock *LoadMBB = Load->getParent();
3142  if (!MBB)
3143  MBB = LoadMBB;
3144  if (LoadMBB != MBB)
3145  return None;
3146 
3147  // Make sure that the MachineMemOperands of every seen load are compatible.
3148  auto &LoadMMO = Load->getMMO();
3149  if (!MMO)
3150  MMO = &LoadMMO;
3151  if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3152  return None;
3153 
3154  // Find out what the base pointer and index for the load is.
3155  Register LoadPtr;
3156  int64_t Idx;
3157  if (!mi_match(Load->getOperand(1).getReg(), MRI,
3158  m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3159  LoadPtr = Load->getOperand(1).getReg();
3160  Idx = 0;
3161  }
3162 
3163  // Don't combine things like a[i], a[i] -> a bigger load.
3164  if (!SeenIdx.insert(Idx).second)
3165  return None;
3166 
3167  // Every load must share the same base pointer; don't combine things like:
3168  //
3169  // a[i], b[i + 1] -> a bigger load.
3170  if (!BasePtr.isValid())
3171  BasePtr = LoadPtr;
3172  if (BasePtr != LoadPtr)
3173  return None;
3174 
3175  if (Idx < LowestIdx) {
3176  LowestIdx = Idx;
3177  LowestIdxLoad = Load;
3178  }
3179 
3180  // Keep track of the byte offset that this load ends up at. If we have seen
3181  // the byte offset, then stop here. We do not want to combine:
3182  //
3183  // a[i] << 16, a[i + k] << 16 -> a bigger load.
3184  if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3185  return None;
3186  Loads.insert(Load);
3187 
3188  // Keep track of the position of the earliest/latest loads in the pattern.
3189  // We will check that there are no load fold barriers between them later
3190  // on.
3191  //
3192  // FIXME: Is there a better way to check for load fold barriers?
3193  if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3194  EarliestLoad = Load;
3195  if (!LatestLoad || dominates(*LatestLoad, *Load))
3196  LatestLoad = Load;
3197  }
3198 
3199  // We found a load for each register. Let's check if each load satisfies the
3200  // pattern.
3201  assert(Loads.size() == RegsToVisit.size() &&
3202  "Expected to find a load for each register?");
3203  assert(EarliestLoad != LatestLoad && EarliestLoad &&
3204  LatestLoad && "Expected at least two loads?");
3205 
3206  // Check if there are any stores, calls, etc. between any of the loads. If
3207  // there are, then we can't safely perform the combine.
3208  //
3209  // MaxIter is chosen based off the (worst case) number of iterations it
3210  // typically takes to succeed in the LLVM test suite plus some padding.
3211  //
3212  // FIXME: Is there a better way to check for load fold barriers?
3213  const unsigned MaxIter = 20;
3214  unsigned Iter = 0;
3215  for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3216  LatestLoad->getIterator())) {
3217  if (Loads.count(&MI))
3218  continue;
3219  if (MI.isLoadFoldBarrier())
3220  return None;
3221  if (Iter++ == MaxIter)
3222  return None;
3223  }
3224 
3225  return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3226 }
3227 
3229  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3230  assert(MI.getOpcode() == TargetOpcode::G_OR);
3231  MachineFunction &MF = *MI.getMF();
3232  // Assuming a little-endian target, transform:
3233  // s8 *a = ...
3234  // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3235  // =>
3236  // s32 val = *((i32)a)
3237  //
3238  // s8 *a = ...
3239  // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3240  // =>
3241  // s32 val = BSWAP(*((s32)a))
3242  Register Dst = MI.getOperand(0).getReg();
3243  LLT Ty = MRI.getType(Dst);
3244  if (Ty.isVector())
3245  return false;
3246 
3247  // We need to combine at least two loads into this type. Since the smallest
3248  // possible load is into a byte, we need at least a 16-bit wide type.
3249  const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3250  if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3251  return false;
3252 
3253  // Match a collection of non-OR instructions in the pattern.
3254  auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3255  if (!RegsToVisit)
3256  return false;
3257 
3258  // We have a collection of non-OR instructions. Figure out how wide each of
3259  // the small loads should be based off of the number of potential loads we
3260  // found.
3261  const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3262  if (NarrowMemSizeInBits % 8 != 0)
3263  return false;
3264 
3265  // Check if each register feeding into each OR is a load from the same
3266  // base pointer + some arithmetic.
3267  //
3268  // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3269  //
3270  // Also verify that each of these ends up putting a[i] into the same memory
3271  // offset as a load into a wide type would.
3272  SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3273  GZExtLoad *LowestIdxLoad, *LatestLoad;
3274  int64_t LowestIdx;
3275  auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3276  MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3277  if (!MaybeLoadInfo)
3278  return false;
3279  std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3280 
3281  // We have a bunch of loads being OR'd together. Using the addresses + offsets
3282  // we found before, check if this corresponds to a big or little endian byte
3283  // pattern. If it does, then we can represent it using a load + possibly a
3284  // BSWAP.
3285  bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3286  Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3287  if (!IsBigEndian.hasValue())
3288  return false;
3289  bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3290  if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3291  return false;
3292 
3293  // Make sure that the load from the lowest index produces offset 0 in the
3294  // final value.
3295  //
3296  // This ensures that we won't combine something like this:
3297  //
3298  // load x[i] -> byte 2
3299  // load x[i+1] -> byte 0 ---> wide_load x[i]
3300  // load x[i+2] -> byte 1
3301  const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3302  const unsigned ZeroByteOffset =
3303  *IsBigEndian
3304  ? bigEndianByteAt(NumLoadsInTy, 0)
3305  : littleEndianByteAt(NumLoadsInTy, 0);
3306  auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3307  if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3308  ZeroOffsetIdx->second != LowestIdx)
3309  return false;
3310 
3311  // We wil reuse the pointer from the load which ends up at byte offset 0. It
3312  // may not use index 0.
3313  Register Ptr = LowestIdxLoad->getPointerReg();
3314  const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3315  LegalityQuery::MemDesc MMDesc(MMO);
3316  MMDesc.MemoryTy = Ty;
3318  {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3319  return false;
3320  auto PtrInfo = MMO.getPointerInfo();
3321  auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3322 
3323  // Load must be allowed and fast on the target.
3324  LLVMContext &C = MF.getFunction().getContext();
3325  auto &DL = MF.getDataLayout();
3326  bool Fast = false;
3327  if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3328  !Fast)
3329  return false;
3330 
3331  MatchInfo = [=](MachineIRBuilder &MIB) {
3332  MIB.setInstrAndDebugLoc(*LatestLoad);
3333  Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3334  MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3335  if (NeedsBSwap)
3336  MIB.buildBSwap(Dst, LoadDst);
3337  };
3338  return true;
3339 }
3340 
3341 /// Check if the store \p Store is a truncstore that can be merged. That is,
3342 /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
3343 /// Register then it does not need to match and SrcVal is set to the source
3344 /// value found.
3345 /// On match, returns the start byte offset of the \p SrcVal that is being
3346 /// stored.
3349  Register TruncVal;
3350  if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
3351  return None;
3352 
3353  // The shift amount must be a constant multiple of the narrow type.
3354  // It is translated to the offset address in the wide source value "y".
3355  //
3356  // x = G_LSHR y, ShiftAmtC
3357  // s8 z = G_TRUNC x
3358  // store z, ...
3359  Register FoundSrcVal;
3360  int64_t ShiftAmt;
3361  if (!mi_match(TruncVal, MRI,
3362  m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
3363  m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
3364  if (!SrcVal.isValid() || TruncVal == SrcVal) {
3365  if (!SrcVal.isValid())
3366  SrcVal = TruncVal;
3367  return 0; // If it's the lowest index store.
3368  }
3369  return None;
3370  }
3371 
3372  unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
3373  if (ShiftAmt % NarrowBits!= 0)
3374  return None;
3375  const unsigned Offset = ShiftAmt / NarrowBits;
3376 
3377  if (SrcVal.isValid() && FoundSrcVal != SrcVal)
3378  return None;
3379 
3380  if (!SrcVal.isValid())
3381  SrcVal = FoundSrcVal;
3382  else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
3383  return None;
3384  return Offset;
3385 }
3386 
3387 /// Match a pattern where a wide type scalar value is stored by several narrow
3388 /// stores. Fold it into a single store or a BSWAP and a store if the targets
3389 /// supports it.
3390 ///
3391 /// Assuming little endian target:
3392 /// i8 *p = ...
3393 /// i32 val = ...
3394 /// p[0] = (val >> 0) & 0xFF;
3395 /// p[1] = (val >> 8) & 0xFF;
3396 /// p[2] = (val >> 16) & 0xFF;
3397 /// p[3] = (val >> 24) & 0xFF;
3398 /// =>
3399 /// *((i32)p) = val;
3400 ///
3401 /// i8 *p = ...
3402 /// i32 val = ...
3403 /// p[0] = (val >> 24) & 0xFF;
3404 /// p[1] = (val >> 16) & 0xFF;
3405 /// p[2] = (val >> 8) & 0xFF;
3406 /// p[3] = (val >> 0) & 0xFF;
3407 /// =>
3408 /// *((i32)p) = BSWAP(val);
3410  MergeTruncStoresInfo &MatchInfo) {
3411  auto &StoreMI = cast<GStore>(MI);
3412  LLT MemTy = StoreMI.getMMO().getMemoryType();
3413 
3414  // We only handle merging simple stores of 1-4 bytes.
3415  if (!MemTy.isScalar())
3416  return false;
3417  switch (MemTy.getSizeInBits()) {
3418  case 8:
3419  case 16:
3420  case 32:
3421  break;
3422  default:
3423  return false;
3424  }
3425  if (!StoreMI.isSimple())
3426  return false;
3427 
3428  // We do a simple search for mergeable stores prior to this one.
3429  // Any potential alias hazard along the way terminates the search.
3430  SmallVector<GStore *> FoundStores;
3431 
3432  // We're looking for:
3433  // 1) a (store(trunc(...)))
3434  // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
3435  // the partial value stored.
3436  // 3) where the offsets form either a little or big-endian sequence.
3437 
3438  auto &LastStore = StoreMI;
3439 
3440  // The single base pointer that all stores must use.
3441  Register BaseReg;
3442  int64_t LastOffset;
3443  if (!mi_match(LastStore.getPointerReg(), MRI,
3444  m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) {
3445  BaseReg = LastStore.getPointerReg();
3446  LastOffset = 0;
3447  }
3448 
3449  GStore *LowestIdxStore = &LastStore;
3450  int64_t LowestIdxOffset = LastOffset;
3451 
3452  Register WideSrcVal;
3453  auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, MRI);
3454  if (!LowestShiftAmt)
3455  return false; // Didn't match a trunc.
3456  assert(WideSrcVal.isValid());
3457 
3458  LLT WideStoreTy = MRI.getType(WideSrcVal);
3459  const unsigned NumStoresRequired =
3460  WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
3461 
3462  SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
3463  OffsetMap[*LowestShiftAmt] = LastOffset;
3464  FoundStores.emplace_back(&LastStore);
3465 
3466  // Search the block up for more stores.
3467  // We use a search threshold of 10 instructions here because the combiner
3468  // works top-down within a block, and we don't want to search an unbounded
3469  // number of predecessor instructions trying to find matching stores.
3470  // If we moved this optimization into a separate pass then we could probably
3471  // use a more efficient search without having a hard-coded threshold.
3472  const int MaxInstsToCheck = 10;
3473  int NumInstsChecked = 0;
3474  for (auto II = ++LastStore.getReverseIterator();
3475  II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
3476  ++II) {
3477  NumInstsChecked++;
3478  GStore *NewStore;
3479  if ((NewStore = dyn_cast<GStore>(&*II))) {
3480  if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
3481  break;
3482  } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
3483  break;
3484  } else {
3485  continue; // This is a safe instruction we can look past.
3486  }
3487 
3488  Register NewBaseReg;
3489  int64_t MemOffset;
3490  // Check we're storing to the same base + some offset.
3491  if (!mi_match(NewStore->getPointerReg(), MRI,
3492  m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) {
3493  NewBaseReg = NewStore->getPointerReg();
3494  MemOffset = 0;
3495  }
3496  if (BaseReg != NewBaseReg)
3497  break;
3498 
3499  auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, MRI);
3500  if (!ShiftByteOffset)
3501  break;
3502  if (MemOffset < LowestIdxOffset) {
3503  LowestIdxOffset = MemOffset;
3504  LowestIdxStore = NewStore;
3505  }
3506 
3507  // Map the offset in the store and the offset in the combined value, and
3508  // early return if it has been set before.
3509  if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
3510  OffsetMap[*ShiftByteOffset] != INT64_MAX)
3511  break;
3512  OffsetMap[*ShiftByteOffset] = MemOffset;
3513 
3514  FoundStores.emplace_back(NewStore);
3515  // Reset counter since we've found a matching inst.
3516  NumInstsChecked = 0;
3517  if (FoundStores.size() == NumStoresRequired)
3518  break;
3519  }
3520 
3521  if (FoundStores.size() != NumStoresRequired) {
3522  return false;
3523  }
3524 
3525  const auto &DL = LastStore.getMF()->getDataLayout();
3526  auto &C = LastStore.getMF()->getFunction().getContext();
3527  // Check that a store of the wide type is both allowed and fast on the target
3528  bool Fast = false;
3530  C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);
3531  if (!Allowed || !Fast)
3532  return false;
3533 
3534  // Check if the pieces of the value are going to the expected places in memory
3535  // to merge the stores.
3536  unsigned NarrowBits = MemTy.getScalarSizeInBits();
3537  auto checkOffsets = [&](bool MatchLittleEndian) {
3538  if (MatchLittleEndian) {
3539  for (unsigned i = 0; i != NumStoresRequired; ++i)
3540  if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
3541  return false;
3542  } else { // MatchBigEndian by reversing loop counter.
3543  for (unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired;
3544  ++i, --j)
3545  if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
3546  return false;
3547  }
3548  return true;
3549  };
3550 
3551  // Check if the offsets line up for the native data layout of this target.
3552  bool NeedBswap = false;
3553  bool NeedRotate = false;
3554  if (!checkOffsets(DL.isLittleEndian())) {
3555  // Special-case: check if byte offsets line up for the opposite endian.
3556  if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
3557  NeedBswap = true;
3558  else if (NumStoresRequired == 2 && checkOffsets(DL.isBigEndian()))
3559  NeedRotate = true;
3560  else
3561  return false;
3562  }
3563 
3564  if (NeedBswap &&
3565  !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}))
3566  return false;
3567  if (NeedRotate &&
3568  !isLegalOrBeforeLegalizer({TargetOpcode::G_ROTR, {WideStoreTy}}))
3569  return false;
3570 
3571  MatchInfo.NeedBSwap = NeedBswap;
3572  MatchInfo.NeedRotate = NeedRotate;
3573  MatchInfo.LowestIdxStore = LowestIdxStore;
3574  MatchInfo.WideSrcVal = WideSrcVal;
3575  MatchInfo.FoundStores = std::move(FoundStores);
3576  return true;
3577 }
3578 
3580  MergeTruncStoresInfo &MatchInfo) {
3581 
3583  Register WideSrcVal = MatchInfo.WideSrcVal;
3584  LLT WideStoreTy = MRI.getType(WideSrcVal);
3585 
3586  if (MatchInfo.NeedBSwap) {
3587  WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);
3588  } else if (MatchInfo.NeedRotate) {
3589  assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
3590  "Unexpected type for rotate");
3591  auto RotAmt =
3592  Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);
3593  WideSrcVal =
3594  Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);
3595  }
3596 
3597  Builder.buildStore(WideSrcVal, MatchInfo.LowestIdxStore->getPointerReg(),
3598  MatchInfo.LowestIdxStore->getMMO().getPointerInfo(),
3599  MatchInfo.LowestIdxStore->getMMO().getAlign());
3600 
3601  // Erase the old stores.
3602  for (auto *ST : MatchInfo.FoundStores)
3603  ST->eraseFromParent();
3604 }
3605 
3607  MachineInstr *&ExtMI) {
3608  assert(MI.getOpcode() == TargetOpcode::G_PHI);
3609 
3610  Register DstReg = MI.getOperand(0).getReg();
3611 
3612  // TODO: Extending a vector may be expensive, don't do this until heuristics
3613  // are better.
3614  if (MRI.getType(DstReg).isVector())
3615  return false;
3616 
3617  // Try to match a phi, whose only use is an extend.
3618  if (!MRI.hasOneNonDBGUse(DstReg))
3619  return false;
3620  ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3621  switch (ExtMI->getOpcode()) {
3622  case TargetOpcode::G_ANYEXT:
3623  return true; // G_ANYEXT is usually free.
3624  case TargetOpcode::G_ZEXT:
3625  case TargetOpcode::G_SEXT:
3626  break;
3627  default:
3628  return false;
3629  }
3630 
3631  // If the target is likely to fold this extend away, don't propagate.
3632  if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI))
3633  return false;
3634 
3635  // We don't want to propagate the extends unless there's a good chance that
3636  // they'll be optimized in some way.
3637  // Collect the unique incoming values.
3639  for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3640  auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3641  switch (DefMI->getOpcode()) {
3642  case TargetOpcode::G_LOAD:
3643  case TargetOpcode::G_TRUNC:
3644  case TargetOpcode::G_SEXT:
3645  case TargetOpcode::G_ZEXT:
3646  case TargetOpcode::G_ANYEXT:
3647  case TargetOpcode::G_CONSTANT:
3648  InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3649  // Don't try to propagate if there are too many places to create new
3650  // extends, chances are it'll increase code size.
3651  if (InSrcs.size() > 2)
3652  return false;
3653  break;
3654  default:
3655  return false;
3656  }
3657  }
3658  return true;
3659 }
3660 
3662  MachineInstr *&ExtMI) {
3663  assert(MI.getOpcode() == TargetOpcode::G_PHI);
3664  Register DstReg = ExtMI->getOperand(0).getReg();
3665  LLT ExtTy = MRI.getType(DstReg);
3666 
3667  // Propagate the extension into the block of each incoming reg's block.
3668  // Use a SetVector here because PHIs can have duplicate edges, and we want
3669  // deterministic iteration order.
3672  for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3673  auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3674  if (!SrcMIs.insert(SrcMI))
3675  continue;
3676 
3677  // Build an extend after each src inst.
3678  auto *MBB = SrcMI->getParent();
3679  MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3680  if (InsertPt != MBB->end() && InsertPt->isPHI())
3681  InsertPt = MBB->getFirstNonPHI();
3682 
3683  Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3684  Builder.setDebugLoc(MI.getDebugLoc());
3685  auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3686  SrcMI->getOperand(0).getReg());
3687  OldToNewSrcMap[SrcMI] = NewExt;
3688  }
3689 
3690  // Create a new phi with the extended inputs.
3692  auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3693  NewPhi.addDef(DstReg);
3694  for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); ++SrcIdx) {
3695  auto &MO = MI.getOperand(SrcIdx);
3696  if (!MO.isReg()) {
3697  NewPhi.addMBB(MO.getMBB());
3698  continue;
3699  }
3700  auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3701  NewPhi.addUse(NewSrc->getOperand(0).getReg());
3702  }
3703  Builder.insertInstr(NewPhi);
3704  ExtMI->eraseFromParent();
3705 }
3706 
3708  Register &Reg) {
3709  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3710  // If we have a constant index, look for a G_BUILD_VECTOR source
3711  // and find the source register that the index maps to.
3712  Register SrcVec = MI.getOperand(1).getReg();
3713  LLT SrcTy = MRI.getType(SrcVec);
3715  {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}}))
3716  return false;
3717 
3718  auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3719  if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3720  return false;
3721 
3722  unsigned VecIdx = Cst->Value.getZExtValue();
3723  MachineInstr *BuildVecMI =
3724  getOpcodeDef(TargetOpcode::G_BUILD_VECTOR, SrcVec, MRI);
3725  if (!BuildVecMI) {
3726  BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec, MRI);
3727  if (!BuildVecMI)
3728  return false;
3729  LLT ScalarTy = MRI.getType(BuildVecMI->getOperand(1).getReg());
3731  {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}}))
3732  return false;
3733  }
3734 
3735  EVT Ty(getMVTForLLT(SrcTy));
3736  if (!MRI.hasOneNonDBGUse(SrcVec) &&
3737  !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
3738  return false;
3739 
3740  Reg = BuildVecMI->getOperand(VecIdx + 1).getReg();
3741  return true;
3742 }
3743 
3745  Register &Reg) {
3746  // Check the type of the register, since it may have come from a
3747  // G_BUILD_VECTOR_TRUNC.
3748  LLT ScalarTy = MRI.getType(Reg);
3749  Register DstReg = MI.getOperand(0).getReg();
3750  LLT DstTy = MRI.getType(DstReg);
3751 
3753  if (ScalarTy != DstTy) {
3754  assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
3755  Builder.buildTrunc(DstReg, Reg);
3756  MI.eraseFromParent();
3757  return;
3758  }
3760 }
3761 
3763  MachineInstr &MI,
3764  SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3765  assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3766  // This combine tries to find build_vector's which have every source element
3767  // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3768  // the masked load scalarization is run late in the pipeline. There's already
3769  // a combine for a similar pattern starting from the extract, but that
3770  // doesn't attempt to do it if there are multiple uses of the build_vector,
3771  // which in this case is true. Starting the combine from the build_vector
3772  // feels more natural than trying to find sibling nodes of extracts.
3773  // E.g.
3774  // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3775  // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3776  // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3777  // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3778  // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3779  // ==>
3780  // replace ext{1,2,3,4} with %s{1,2,3,4}
3781 
3782  Register DstReg = MI.getOperand(0).getReg();
3783  LLT DstTy = MRI.getType(DstReg);
3784  unsigned NumElts = DstTy.getNumElements();
3785 
3786  SmallBitVector ExtractedElts(NumElts);
3787  for (auto &II : make_range(MRI.use_instr_nodbg_begin(DstReg),
3788  MRI.use_instr_nodbg_end())) {
3789  if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3790  return false;
3791  auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
3792  if (!Cst)
3793  return false;
3794  unsigned Idx = Cst.getValue().getZExtValue();
3795  if (Idx >= NumElts)
3796  return false; // Out of range.
3797  ExtractedElts.set(Idx);
3798  SrcDstPairs.emplace_back(
3799  std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
3800  }
3801  // Match if every element was extracted.
3802  return ExtractedElts.all();
3803 }
3804 
3806  MachineInstr &MI,
3807  SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
3808  assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3809  for (auto &Pair : SrcDstPairs) {
3810  auto *ExtMI = Pair.second;
3811  replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
3812  ExtMI->eraseFromParent();
3813  }
3814  MI.eraseFromParent();
3815 }
3816 
3818  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3820  MatchInfo(Builder);
3821  MI.eraseFromParent();
3822 }
3823 
3825  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3827  MatchInfo(Builder);
3828 }
3829 
3830 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
3832  unsigned Opc = MI.getOpcode();
3833  assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
3834  Register X = MI.getOperand(1).getReg();
3835  Register Y = MI.getOperand(2).getReg();
3836  if (X != Y)
3837  return false;
3838  unsigned RotateOpc =
3839  Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
3840  return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
3841 }
3842 
3844  unsigned Opc = MI.getOpcode();
3845  assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
3846  bool IsFSHL = Opc == TargetOpcode::G_FSHL;
3848  MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
3849  : TargetOpcode::G_ROTR));
3850  MI.RemoveOperand(2);
3852 }
3853 
3854 // Fold (rot x, c) -> (rot x, c % BitSize)
3856  assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
3857  MI.getOpcode() == TargetOpcode::G_ROTR);
3858  unsigned Bitsize =
3859  MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3860  Register AmtReg = MI.getOperand(2).getReg();
3861  bool OutOfRange = false;
3862  auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
3863  if (auto *CI = dyn_cast<ConstantInt>(C))
3864  OutOfRange |= CI->getValue().uge(Bitsize);
3865  return true;
3866  };
3867  return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
3868 }
3869 
3871  assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
3872  MI.getOpcode() == TargetOpcode::G_ROTR);
3873  unsigned Bitsize =
3874  MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
3876  Register Amt = MI.getOperand(2).getReg();
3877  LLT AmtTy = MRI.getType(Amt);
3878  auto Bits = Builder.buildConstant(AmtTy, Bitsize);
3879  Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
3881  MI.getOperand(2).setReg(Amt);
3883 }
3884 
3886  int64_t &MatchInfo) {
3887  assert(MI.getOpcode() == TargetOpcode::G_ICMP);
3888  auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
3889  auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
3890  auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
3891  Optional<bool> KnownVal;
3892  switch (Pred) {
3893  default:
3894  llvm_unreachable("Unexpected G_ICMP predicate?");
3895  case CmpInst::ICMP_EQ:
3896  KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
3897  break;
3898  case CmpInst::ICMP_NE:
3899  KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
3900  break;
3901  case CmpInst::ICMP_SGE:
3902  KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
3903  break;
3904  case CmpInst::ICMP_SGT:
3905  KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
3906  break;
3907  case CmpInst::ICMP_SLE:
3908  KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
3909  break;
3910  case CmpInst::ICMP_SLT:
3911  KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
3912  break;
3913  case CmpInst::ICMP_UGE:
3914  KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
3915  break;
3916  case CmpInst::ICMP_UGT:
3917  KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
3918  break;
3919  case CmpInst::ICMP_ULE:
3920  KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
3921  break;
3922  case CmpInst::ICMP_ULT:
3923  KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
3924  break;
3925  }
3926  if (!KnownVal)
3927  return false;
3928  MatchInfo =
3929  *KnownVal
3931  /*IsVector = */
3932  MRI.getType(MI.getOperand(0).getReg()).isVector(),
3933  /* IsFP = */ false)
3934  : 0;
3935  return true;
3936 }
3937 
3939  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3940  assert(MI.getOpcode() == TargetOpcode::G_ICMP);
3941  // Given:
3942  //
3943  // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
3944  // %cmp = G_ICMP ne %x, 0
3945  //
3946  // Or:
3947  //
3948  // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
3949  // %cmp = G_ICMP eq %x, 1
3950  //
3951  // We can replace %cmp with %x assuming true is 1 on the target.
3952  auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
3953  if (!CmpInst::isEquality(Pred))
3954  return false;
3955  Register Dst = MI.getOperand(0).getReg();
3956  LLT DstTy = MRI.getType(Dst);
3957  if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(),
3958  /* IsFP = */ false) != 1)
3959  return false;
3960  int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
3961  if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
3962  return false;
3963  Register LHS = MI.getOperand(2).getReg();
3964  auto KnownLHS = KB->getKnownBits(LHS);
3965  if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
3966  return false;
3967  // Make sure replacing Dst with the LHS is a legal operation.
3968  LLT LHSTy = MRI.getType(LHS);
3969  unsigned LHSSize = LHSTy.getSizeInBits();
3970  unsigned DstSize = DstTy.getSizeInBits();
3971  unsigned Op = TargetOpcode::COPY;
3972  if (DstSize != LHSSize)
3973  Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
3974  if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
3975  return false;
3976  MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
3977  return true;
3978 }
3979 
3980 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
3982  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3983  assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
3984  Register Dst = MI.getOperand(0).getReg();
3985  Register Src = MI.getOperand(1).getReg();
3986  LLT Ty = MRI.getType(Src);
3988  if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
3989  return false;
3990  int64_t Width = MI.getOperand(2).getImm();
3991  Register ShiftSrc;
3992  int64_t ShiftImm;
3993  if (!mi_match(
3994  Src, MRI,
3995  m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
3996  m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
3997  return false;
3998  if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
3999  return false;
4000 
4001  MatchInfo = [=](MachineIRBuilder &B) {
4002  auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4003  auto Cst2 = B.buildConstant(ExtractTy, Width);
4004  B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4005  };
4006  return true;
4007 }
4008 
4009 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4010 bool CombinerHelper::matchBitfieldExtractFromAnd(
4011  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4012  assert(MI.getOpcode() == TargetOpcode::G_AND);
4013  Register Dst = MI.getOperand(0).getReg();
4014  LLT Ty = MRI.getType(Dst);
4015  if (!getTargetLowering().isConstantUnsignedBitfieldExtactLegal(
4016  TargetOpcode::G_UBFX, Ty, Ty))
4017  return false;
4018 
4019  int64_t AndImm, LSBImm;
4020  Register ShiftSrc;
4021  const unsigned Size = Ty.getScalarSizeInBits();
4022  if (!mi_match(MI.getOperand(0).getReg(), MRI,
4023  m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))),
4024  m_ICst(AndImm))))
4025  return false;
4026 
4027  // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4028  auto MaybeMask = static_cast<uint64_t>(AndImm);
4029  if (MaybeMask & (MaybeMask + 1))
4030  return false;
4031 
4032  // LSB must fit within the register.
4033  if (static_cast<uint64_t>(LSBImm) >= Size)
4034  return false;
4035 
4037  uint64_t Width = APInt(Size, AndImm).countTrailingOnes();
4038  MatchInfo = [=](MachineIRBuilder &B) {
4039  auto WidthCst = B.buildConstant(ExtractTy, Width);
4040  auto LSBCst = B.buildConstant(ExtractTy, LSBImm);
4041  B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4042  };
4043  return true;
4044 }
4045 
4047  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4048  const unsigned Opcode = MI.getOpcode();
4049  assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4050 
4051  const Register Dst = MI.getOperand(0).getReg();
4052 
4053  const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4054  ? TargetOpcode::G_SBFX
4055  : TargetOpcode::G_UBFX;
4056 
4057  // Check if the type we would use for the extract is legal
4058  LLT Ty = MRI.getType(Dst);
4060  if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}}))
4061  return false;
4062 
4063  Register ShlSrc;
4064  int64_t ShrAmt;
4065  int64_t ShlAmt;
4066  const unsigned Size = Ty.getScalarSizeInBits();
4067 
4068  // Try to match shr (shl x, c1), c2
4069  if (!mi_match(Dst, MRI,
4070  m_BinOp(Opcode,
4071  m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))),
4072  m_ICst(ShrAmt))))
4073  return false;
4074 
4075  // Make sure that the shift sizes can fit a bitfield extract
4076  if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4077  return false;
4078 
4079  // Skip this combine if the G_SEXT_INREG combine could handle it
4080  if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4081  return false;
4082 
4083  // Calculate start position and width of the extract
4084  const int64_t Pos = ShrAmt - ShlAmt;
4085  const int64_t Width = Size - ShrAmt;
4086 
4087  MatchInfo = [=](MachineIRBuilder &B) {
4088  auto WidthCst = B.buildConstant(ExtractTy, Width);
4089  auto PosCst = B.buildConstant(ExtractTy, Pos);
4090  B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4091  };
4092  return true;
4093 }
4094 
4095 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4096  MachineInstr &PtrAdd) {
4097  assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD);
4098 
4099  Register Src1Reg = PtrAdd.getOperand(1).getReg();
4100  MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI);
4101  if (!Src1Def)
4102  return false;
4103 
4104  Register Src2Reg = PtrAdd.getOperand(2).getReg();
4105 
4106  if (MRI.hasOneNonDBGUse(Src1Reg))
4107  return false;
4108 
4109  auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI);
4110  if (!C1)
4111  return false;
4112  auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4113  if (!C2)
4114  return false;
4115 
4116  const APInt &C1APIntVal = *C1;
4117  const APInt &C2APIntVal = *C2;
4118  const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4119 
4120  for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) {
4121  // This combine may end up running before ptrtoint/inttoptr combines
4122  // manage to eliminate redundant conversions, so try to look through them.
4123  MachineInstr *ConvUseMI = &UseMI;
4124  unsigned ConvUseOpc = ConvUseMI->getOpcode();
4125  while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4126  ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4127  Register DefReg = ConvUseMI->getOperand(0).getReg();
4128  if (!MRI.hasOneNonDBGUse(DefReg))
4129  break;
4130  ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg);
4131  ConvUseOpc = ConvUseMI->getOpcode();
4132  }
4133  auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4134  ConvUseOpc == TargetOpcode::G_STORE;
4135  if (!LoadStore)
4136  continue;
4137  // Is x[offset2] already not a legal addressing mode? If so then
4138  // reassociating the constants breaks nothing (we test offset2 because
4139  // that's the one we hope to fold into the load or store).
4141  AM.HasBaseReg = true;
4142  AM.BaseOffs = C2APIntVal.getSExtValue();
4143  unsigned AS =
4144  MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace();
4145  Type *AccessTy =
4146  getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()),
4147  PtrAdd.getMF()->getFunction().getContext());
4148  const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering();
4149  if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4150  AccessTy, AS))
4151  continue;
4152 
4153  // Would x[offset1+offset2] still be a legal addressing mode?
4154  AM.BaseOffs = CombinedValue;
4155  if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM,
4156  AccessTy, AS))
4157  return true;
4158  }
4159 
4160  return false;
4161 }
4162 
4164  MachineInstr *RHS,
4165  BuildFnTy &MatchInfo) {
4166  // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4167  Register Src1Reg = MI.getOperand(1).getReg();
4168  if (RHS->getOpcode() != TargetOpcode::G_ADD)
4169  return false;
4170  auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI);
4171  if (!C2)
4172  return false;
4173 
4174  MatchInfo = [=, &MI](MachineIRBuilder &B) {
4175  LLT PtrTy = MRI.getType(MI.getOperand(0).getReg());
4176 
4177  auto NewBase =
4178  Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg());
4180  MI.getOperand(1).setReg(NewBase.getReg(0));
4181  MI.getOperand(2).setReg(RHS->getOperand(2).getReg());
4183  };
4184  return !reassociationCanBreakAddressingModePattern(MI);
4185 }
4186 
4188  MachineInstr *LHS,
4189  MachineInstr *RHS,
4190  BuildFnTy &MatchInfo) {
4191  // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4192  // if and only if (G_PTR_ADD X, C) has one use.
4193  Register LHSBase;
4194  Optional<ValueAndVReg> LHSCstOff;
4195  if (!mi_match(MI.getBaseReg(), MRI,
4196  m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff)))))
4197  return false;
4198 
4199  auto *LHSPtrAdd = cast<GPtrAdd>(LHS);
4200  MatchInfo = [=, &MI](MachineIRBuilder &B) {
4201  // When we change LHSPtrAdd's offset register we might cause it to use a reg
4202  // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4203  // doesn't happen.
4204  LHSPtrAdd->moveBefore(&MI);
4205  Register RHSReg = MI.getOffsetReg();
4207  MI.getOperand(2).setReg(LHSCstOff->VReg);
4209  Observer.changingInstr(*LHSPtrAdd);
4210  LHSPtrAdd->getOperand(2).setReg(RHSReg);
4211  Observer.changedInstr(*LHSPtrAdd);
4212  };
4213  return !reassociationCanBreakAddressingModePattern(MI);
4214 }
4215 
4217  MachineInstr *LHS,
4218  MachineInstr *RHS,
4219  BuildFnTy &MatchInfo) {
4220  // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4221  auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
4222  if (!LHSPtrAdd)
4223  return false;
4224 
4225  Register Src2Reg = MI.getOperand(2).getReg();
4226  Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4227  Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4228  auto C1 = getIConstantVRegVal(LHSSrc2, MRI);
4229  if (!C1)
4230  return false;
4231  auto C2 = getIConstantVRegVal(Src2Reg, MRI);
4232  if (!C2)
4233  return false;
4234 
4235  MatchInfo = [=, &MI](MachineIRBuilder &B) {
4236  auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2);
4238  MI.getOperand(1).setReg(LHSSrc1);
4239  MI.getOperand(2).setReg(NewCst.getReg(0));
4241  };
4242  return !reassociationCanBreakAddressingModePattern(MI);
4243 }
4244 
4246  BuildFnTy &MatchInfo) {
4247  auto &PtrAdd = cast<GPtrAdd>(MI);
4248  // We're trying to match a few pointer computation patterns here for
4249  // re-association opportunities.
4250  // 1) Isolating a constant operand to be on the RHS, e.g.:
4251  // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4252  //
4253  // 2) Folding two constants in each sub-tree as long as such folding
4254  // doesn't break a legal addressing mode.
4255  // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4256  //
4257  // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4258  // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4259  // iif (G_PTR_ADD X, C) has one use.
4260  MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg());
4261  MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg());
4262 
4263  // Try to match example 2.
4264  if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo))
4265  return true;
4266 
4267  // Try to match example 3.
4268  if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo))
4269  return true;
4270 
4271  // Try to match example 1.
4272  if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo))
4273  return true;
4274 
4275  return false;
4276 }
4277 
4279  Register Op1 = MI.getOperand(1).getReg();
4280  Register Op2 = MI.getOperand(2).getReg();
4281  auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
4282  if (!MaybeCst)
4283  return false;
4284  MatchInfo = *MaybeCst;
4285  return true;
4286 }
4287 
4289  MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4290  // Look for a binop feeding into an AND with a mask:
4291  //
4292  // %add = G_ADD %lhs, %rhs
4293  // %and = G_AND %add, 000...11111111
4294  //
4295  // Check if it's possible to perform the binop at a narrower width and zext
4296  // back to the original width like so:
4297  //
4298  // %narrow_lhs = G_TRUNC %lhs
4299  // %narrow_rhs = G_TRUNC %rhs
4300  // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4301  // %new_add = G_ZEXT %narrow_add
4302  // %and = G_AND %new_add, 000...11111111
4303  //
4304  // This can allow later combines to eliminate the G_AND if it turns out
4305  // that the mask is irrelevant.
4306  assert(MI.getOpcode() == TargetOpcode::G_AND);
4307  Register Dst = MI.getOperand(0).getReg();
4308  Register AndLHS = MI.getOperand(1).getReg();
4309  Register AndRHS = MI.getOperand(2).getReg();
4310  LLT WideTy = MRI.getType(Dst);
4311 
4312  // If the potential binop has more than one use, then it's possible that one
4313  // of those uses will need its full width.
4314  if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS))
4315  return false;
4316 
4317  // Check if the LHS feeding the AND is impacted by the high bits that we're
4318  // masking out.
4319  //
4320  // e.g. for 64-bit x, y:
4321  //
4322  // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4323  MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI);
4324  if (!LHSInst)
4325  return false;
4326  unsigned LHSOpc = LHSInst->getOpcode();
4327  switch (LHSOpc) {
4328  default:
4329  return false;
4330  case TargetOpcode::G_ADD:
4331  case TargetOpcode::G_SUB:
4332  case TargetOpcode::G_MUL:
4333  case TargetOpcode::G_AND:
4334  case TargetOpcode::G_OR:
4335  case TargetOpcode::G_XOR:
4336  break;
4337  }
4338 
4339  // Find the mask on the RHS.
4340  auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI);
4341  if (!Cst)
4342  return false;
4343  auto Mask = Cst->Value;
4344  if (!Mask.isMask())
4345  return false;
4346 
4347  // No point in combining if there's nothing to truncate.
4348  unsigned NarrowWidth = Mask.countTrailingOnes();
4349  if (NarrowWidth == WideTy.getSizeInBits())
4350  return false;
4351  LLT NarrowTy = LLT::scalar(NarrowWidth);
4352 
4353  // Check if adding the zext + truncates could be harmful.
4354  auto &MF = *MI.getMF();
4355  const auto &TLI = getTargetLowering();
4356  LLVMContext &Ctx = MF.getFunction().getContext();
4357  auto &DL = MF.getDataLayout();
4358  if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) ||
4359  !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx))
4360  return false;
4361  if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) ||
4362  !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}}))
4363  return false;
4364  Register BinOpLHS = LHSInst->getOperand(1).getReg();
4365  Register BinOpRHS = LHSInst->getOperand(2).getReg();
4366  MatchInfo = [=, &MI](MachineIRBuilder &B) {
4367  auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS);
4368  auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS);
4369  auto NarrowBinOp =
4370  Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS});
4371  auto Ext = Builder.buildZExt(WideTy, NarrowBinOp);
4373  MI.getOperand(1).setReg(Ext.getReg(0));
4375  };
4376  return true;
4377 }
4378 
4380  if (tryCombineCopy(MI))
4381  return true;
4383  return true;
4385  return true;
4386  return false;
4387 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::CombinerHelper::matchCombineConstPtrAddToI2P
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst)
Definition: CombinerHelper.cpp:2006
i
i
Definition: README.txt:29
MIPatternMatch.h
llvm::CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
Definition: CombinerHelper.cpp:1730
llvm::lltok::APFloat
@ APFloat
Definition: LLToken.h:492
llvm::IndexedLoadStoreMatchInfo::Addr
Register Addr
Definition: CombinerHelper.h:51
llvm::getIConstantVRegSExtVal
Optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition: Utils.cpp:282
llvm::GISelChangeObserver::erasingInstr
virtual void erasingInstr(MachineInstr &MI)=0
An instruction is about to be erased.
llvm::CombinerHelper::matchConstantFold
bool matchConstantFold(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
Definition: CombinerHelper.cpp:4278
llvm::SmallBitVector::set
SmallBitVector & set()
Definition: SmallBitVector.h:365
llvm::CombinerHelper::applyCombineShiftToUnmerge
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal)
Definition: CombinerHelper.cpp:1847
LowLevelType.h
llvm::MachineInstr::uses
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
Definition: MachineInstr.h:666
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:443
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:103
MachineInstr.h
MathExtras.h
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:69
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::tgtok::Def
@ Def
Definition: TGLexer.h:50
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::CombinerHelper::applyExtractVecEltBuildVec
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
Definition: CombinerHelper.cpp:3744
llvm::RegisterImmPair
Definition: CombinerHelper.h:63
llvm::MachineRegisterInfo::use_instr_nodbg_end
static use_instr_nodbg_iterator use_instr_nodbg_end()
Definition: MachineRegisterInfo.h:538
llvm::MachineRegisterInfo::constrainRegAttrs
bool constrainRegAttrs(Register Reg, Register ConstrainingReg, unsigned MinNumRegs=0)
Constrain the register class or the register bank of the virtual register Reg (and low-level type) to...
Definition: MachineRegisterInfo.cpp:92
llvm::MachineIRBuilder::setDebugLoc
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
Definition: MachineIRBuilder.h:355
llvm::MachineIRBuilder::buildIntToPtr
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
Definition: MachineIRBuilder.h:628
llvm::CmpInst::ICMP_EQ
@ ICMP_EQ
equal
Definition: InstrTypes.h:741
UseMI
MachineInstrBuilder & UseMI
Definition: AArch64ExpandPseudoInsts.cpp:102
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::MergeTruncStoresInfo::WideSrcVal
Register WideSrcVal
Definition: CombinerHelper.h:80
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::GLoad
Represents a G_LOAD.
Definition: GenericMachineInstrs.h:96
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:213
llvm::CombinerHelper::matchCombineUnmergeConstant
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
Definition: CombinerHelper.cpp:1689
llvm::CombinerHelper::MRI
MachineRegisterInfo & MRI
Definition: CombinerHelper.h:107
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:720
llvm::CombinerHelper::applyOptBrCondByInvertingCond
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
Definition: CombinerHelper.cpp:1161
llvm::CombinerHelper::matchHoistLogicOpWithSameOpcodeHands
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
Definition: CombinerHelper.cpp:2522
llvm::MachineIRBuilder::buildAShr
MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Definition: MachineIRBuilder.h:1488
llvm::MachineMemOperand::getAlign
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
Definition: MachineOperand.cpp:1082
llvm::MIPatternMatch::m_GFNeg
UnaryOp_match< SrcTy, TargetOpcode::G_FNEG > m_GFNeg(const SrcTy &Src)
Definition: MIPatternMatch.h:468
llvm::MIPatternMatch::m_Reg
operand_type_match m_Reg()
Definition: MIPatternMatch.h:137
llvm::MIPatternMatch::m_GAnd
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
Definition: MIPatternMatch.h:349
llvm::GISelKnownBits
Definition: GISelKnownBits.h:29
llvm::MIPatternMatch::m_GShl
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
Definition: MIPatternMatch.h:367
llvm::ShiftOfShiftedLogic::Shift2
MachineInstr * Shift2
Definition: CombinerHelper.h:70
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::MachineInstr::mayLoadOrStore
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:1028
llvm::APInt::isMask
bool isMask(unsigned numBits) const
Definition: APInt.h:462
llvm::CombinerHelper::applyCombineI2PToP2I
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg)
Definition: CombinerHelper.cpp:1940
llvm::LegalizeActionStep::Action
LegalizeAction Action
The action to take or the final answer.
Definition: LegalizerInfo.h:144
llvm::CombinerHelper::eraseInst
bool eraseInst(MachineInstr &MI)
Erase MI.
Definition: CombinerHelper.cpp:2251
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::lookup
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
llvm::NVPTX::LoadStore
LoadStore
Definition: NVPTX.h:99
matchLoadAndBytePosition
static Optional< std::pair< GZExtLoad *, int64_t > > matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, const MachineRegisterInfo &MRI)
Helper function for findLoadOffsetsForLoadOrCombine.
Definition: CombinerHelper.cpp:3068
llvm::MIPatternMatch::m_GLShr
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
Definition: MIPatternMatch.h:373
llvm::IndexedLoadStoreMatchInfo::Base
Register Base
Definition: CombinerHelper.h:52
llvm::getOpcodeDef
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
Definition: Utils.cpp:457
llvm::SetVector< T, SmallVector< T, N >, SmallDenseSet< T, N > >::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
llvm::KnownBits::Zero
APInt Zero
Definition: KnownBits.h:24
C1
instcombine should handle this C2 when C1
Definition: README.txt:263
constantFoldFpUnary
static Optional< APFloat > constantFoldFpUnary(unsigned Opcode, LLT DstTy, const Register Op, const MachineRegisterInfo &MRI)
Definition: CombinerHelper.cpp:1209
llvm::MachineIRBuilder::buildBSwap
MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0)
Build and insert Dst = G_BSWAP Src0.
Definition: MachineIRBuilder.h:1574
llvm::ConstantInt::getValue
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:133
llvm::CombinerHelper::matchCombineP2IToI2P
bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg)
Transform PtrToInt(IntToPtr(x)) to x.
Definition: CombinerHelper.cpp:1948
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::CombinerHelper::getTargetLowering
const TargetLowering & getTargetLowering() const
Definition: CombinerHelper.cpp:58
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:430
llvm::GISelChangeObserver::finishedChangingAllUsesOfReg
void finishedChangingAllUsesOfReg()
All instructions reported as changing by changingAllUsesOfReg() have finished being changed.
Definition: GISelChangeObserver.cpp:26
llvm::CombinerHelper::tryCombineMemCpyFamily
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
Optimize memcpy intrinsics et al, e.g.
Definition: CombinerHelper.cpp:1201
peekThroughBitcast
static Register peekThroughBitcast(Register Reg, const MachineRegisterInfo &MRI)
Definition: CombinerHelper.cpp:1634
llvm::APInt::getSExtValue
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1467
llvm::MIPatternMatch::m_OneNonDBGUse
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
Definition: MIPatternMatch.h:58
llvm::CombinerHelper::dominates
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI dominates UseMI.
Definition: CombinerHelper.cpp:722
llvm::MipsISD::Lo
@ Lo
Definition: MipsISelLowering.h:79
GISelKnownBits.h
llvm::MergeTruncStoresInfo
Definition: CombinerHelper.h:77
llvm::RegisterBankInfo::getRegBank
RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
Definition: RegisterBankInfo.h:432
llvm::MachineRegisterInfo::getUniqueVRegDef
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
Definition: MachineRegisterInfo.cpp:411
llvm::SmallDenseMap
Definition: DenseMap.h:880
RegisterBankInfo.h
llvm::CmpInst::ICMP_NE
@ ICMP_NE
not equal
Definition: InstrTypes.h:742
llvm::CmpInst::getInversePredicate
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:820
llvm::CombinerHelper::matchConstantSelectCmp
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx)
Return true if a G_SELECT instruction MI has a constant comparison.
Definition: CombinerHelper.cpp:2241
llvm::MachineIRBuilder::buildZExtOrTrunc
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
Definition: MachineIRBuilder.cpp:477
llvm::CombinerHelper::applyFunnelShiftToRotate
void applyFunnelShiftToRotate(MachineInstr &MI)
Definition: CombinerHelper.cpp:3843
llvm::MachineRegisterInfo::use_instr_nodbg_begin
use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const
Definition: MachineRegisterInfo.h:535
MachineBasicBlock.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::MIPatternMatch::m_GTrunc
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
Definition: MIPatternMatch.h:434
llvm::CombinerHelper::applyBuildFn
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
Definition: CombinerHelper.cpp:3817
llvm::KnownBits::ult
static Optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
Definition: KnownBits.cpp:363
llvm::MachineRegisterInfo::use_nodbg_instructions
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
Definition: MachineRegisterInfo.h:543
llvm::CombinerHelper::tryCombineShuffleVector
bool tryCombineShuffleVector(MachineInstr &MI)
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
Definition: CombinerHelper.cpp:257
llvm::CmpInst::ICMP_SGT
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:747
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:168
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::CombinerHelper::matchCombineDivRem
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
Definition: CombinerHelper.cpp:1033
Shift
bool Shift
Definition: README.txt:468
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::CombinerHelper::tryCombineIndexedLoadStore
bool tryCombineIndexedLoadStore(MachineInstr &MI)
Combine MI into a pre-indexed or post-indexed load/store operation if legal and the surrounding code ...
Definition: CombinerHelper.cpp:961
llvm::LegalityQuery::MemDesc
Definition: LegalizerInfo.h:112
llvm::GLoadStore::getMemSizeInBits
uint64_t getMemSizeInBits() const
Returns the size in bits of the memory access.
Definition: GenericMachineInstrs.h:62
llvm::tgtok::Bits
@ Bits
Definition: TGLexer.h:50
TargetInstrInfo.h
llvm::MergeTruncStoresInfo::NeedRotate
bool NeedRotate
Definition: CombinerHelper.h:82
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::MachineRegisterInfo::use_operands
iterator_range< use_iterator > use_operands(Register Reg) const
Definition: MachineRegisterInfo.h:469
llvm::CombinerHelper::matchNarrowBinopFeedingAnd
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Definition: CombinerHelper.cpp:4288
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
llvm::InstructionStepsMatchInfo
Definition: CombinerHelper.h:95
isBigEndian
static Optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
Definition: CombinerHelper.cpp:98
llvm::MachineInstr::getMF
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
Definition: MachineInstr.cpp:663
llvm::MergeTruncStoresInfo::FoundStores
SmallVector< GStore * > FoundStores
Definition: CombinerHelper.h:78
llvm::CombinerHelper::applyCombineExtOfExt
void applyCombineExtOfExt(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Definition: CombinerHelper.cpp:2078
llvm::ConstantFP::getValueAPF
const APFloat & getValueAPF() const
Definition: Constants.h:297
llvm::LLT::isValid
bool isValid() const
Definition: LowLevelTypeImpl.h:117
llvm::MachineOperand::setPredicate
void setPredicate(unsigned Predicate)
Definition: MachineOperand.h:708
GenericMachineInstrs.h
llvm::Optional< bool >
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::CombinerHelper::tryCombineShiftToUnmerge
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount)
Definition: CombinerHelper.cpp:1920
llvm::MachineIRBuilder::buildBuildVector
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
Definition: MachineIRBuilder.cpp:632
llvm::CombinerHelper::matchSextTruncSextLoad
bool matchSextTruncSextLoad(MachineInstr &MI)
Definition: CombinerHelper.cpp:734
llvm::CmpInst::ICMP_SLE
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:750
llvm::MachineIRBuilder::buildInstrNoInsert
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
Definition: MachineIRBuilder.cpp:40
llvm::MachineIRBuilder::setInstr
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
Definition: MachineIRBuilder.h:333
MachineIRBuilder.h
llvm::MachineDominatorTree::dominates
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
Definition: MachineDominators.h:109
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::APInt::lshr
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:808
llvm::MachineIRBuilder::buildExtOrTrunc
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
Definition: MachineIRBuilder.cpp:448
llvm::isPowerOf2_32
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:491
llvm::CombinerHelper::matchPtrAddZero
bool matchPtrAddZero(MachineInstr &MI)
}
Definition: CombinerHelper.cpp:2955
llvm::CombinerHelper::tryCombineExtendingLoads
bool tryCombineExtendingLoads(MachineInstr &MI)
If MI is extend that consumes the result of a load, try to combine it.
Definition: CombinerHelper.cpp:442
llvm::MachineIRBuilder::buildXor
MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_XOR Op0, Op1.
Definition: MachineIRBuilder.h:1527
llvm::PtrAddChain::Imm
int64_t Imm
Definition: CombinerHelper.h:58
llvm::LegalizerHelper
Definition: LegalizerHelper.h:39
llvm::CombinerHelper::matchPtrAddImmedChain
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
Definition: CombinerHelper.cpp:1271
llvm::CombinerHelper::matchRotateOutOfRange
bool matchRotateOutOfRange(MachineInstr &MI)
Definition: CombinerHelper.cpp:3855
llvm::CombinerHelper::matchNotCmp
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
Combine inverting a result of a compare into the opposite cond code.
Definition: CombinerHelper.cpp:2801
llvm::CombinerHelper::applyCombineTruncOfExt
void applyCombineTruncOfExt(MachineInstr &MI, std::pair< Register, unsigned > &MatchInfo)
Definition: CombinerHelper.cpp:2148
LegalizerInfo.h
llvm::BitmaskEnumDetail::Mask
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
TRI
unsigned const TargetRegisterInfo * TRI
Definition: MachineSink.cpp:1567
llvm::CombinerHelper::applyNotCmp
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
Definition: CombinerHelper.cpp:2878
llvm::CombinerHelper::matchCombineCopy
bool matchCombineCopy(MachineInstr &MI)
Definition: CombinerHelper.cpp:166
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::CombinerHelper::matchCombineAddP2IToPtrAdd
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
Definition: CombinerHelper.cpp:1962
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:255
llvm::MIPatternMatch::m_GPtrToInt
UnaryOp_match< SrcTy, TargetOpcode::G_PTRTOINT > m_GPtrToInt(const SrcTy &Src)
Definition: MIPatternMatch.h:446
llvm::getICmpTrueVal
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
Definition: Utils.cpp:1035
llvm::MachineIRBuilder::buildURem
MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, Optional< unsigned > Flags=None)
Build and insert Res = G_UREM Op0, Op1.
Definition: MachineIRBuilder.h:1440
llvm::MIPatternMatch::m_GFabs
UnaryOp_match< SrcTy, TargetOpcode::G_FABS > m_GFabs(const SrcTy &Src)
Definition: MIPatternMatch.h:463
llvm::PtrAddChain
Definition: CombinerHelper.h:57
MachineRegisterInfo.h
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:579
llvm::CombinerHelper::matchAshrShlToSextInreg
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
Match ashr (shl x, C), C -> sext_inreg (C)
Definition: CombinerHelper.cpp:2628
isConstValidTrue
static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, int64_t Cst, bool IsVector, bool IsFP)
Definition: CombinerHelper.cpp:2794
llvm::MipsISD::Hi
@ Hi
Definition: MipsISelLowering.h:75
llvm::getFltSemanticForLLT
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
Definition: LowLevelType.cpp:73
llvm::CombinerHelper::applyTruncStoreMerge
void applyTruncStoreMerge(MachineInstr &MI, MergeTruncStoresInfo &MatchInfo)
Definition: CombinerHelper.cpp:3579
llvm::Optional::hasValue
constexpr bool hasValue() const
Definition: Optional.h:288
llvm::InstructionBuildSteps
Definition: CombinerHelper.h:87
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::PtrAddChain::Base
Register Base
Definition: CombinerHelper.h:59
llvm::ValueAndVReg::VReg
Register VReg
Definition: Utils.h:178
llvm::SmallBitVector
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Definition: SmallBitVector.h:34
llvm::MachineInstr::getFlags
uint16_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:325
llvm::MachineMemOperand::getPointerInfo
const MachinePointerInfo & getPointerInfo() const
Definition: MachineMemOperand.h:202
llvm::MIPatternMatch::m_GSExt
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
Definition: MIPatternMatch.h:419
llvm::MachineInstrBuilder::addDef
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Definition: MachineInstrBuilder.h:116
llvm::RegisterImmPair::Imm
int64_t Imm
Definition: CombinerHelper.h:65