LLVM 18.0.0git
CombinerHelper.cpp
Go to the documentation of this file.
1//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "llvm/ADT/APFloat.h"
10#include "llvm/ADT/STLExtras.h"
11#include "llvm/ADT/SetVector.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/InstrTypes.h"
37#include <cmath>
38#include <optional>
39#include <tuple>
40
41#define DEBUG_TYPE "gi-combiner"
42
43using namespace llvm;
44using namespace MIPatternMatch;
45
46// Option to allow testing of the combiner while no targets know about indexed
47// addressing.
48static cl::opt<bool>
49 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
50 cl::desc("Force all indexed operations to be "
51 "legal for the GlobalISel combiner"));
52
54 MachineIRBuilder &B, bool IsPreLegalize,
56 const LegalizerInfo *LI)
57 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
58 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
59 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
60 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
61 (void)this->KB;
62}
63
66}
67
68/// \returns The little endian in-memory byte position of byte \p I in a
69/// \p ByteWidth bytes wide type.
70///
71/// E.g. Given a 4-byte type x, x[0] -> byte 0
72static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
73 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
74 return I;
75}
76
77/// Determines the LogBase2 value for a non-null input value using the
78/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
80 auto &MRI = *MIB.getMRI();
81 LLT Ty = MRI.getType(V);
82 auto Ctlz = MIB.buildCTLZ(Ty, V);
83 auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1);
84 return MIB.buildSub(Ty, Base, Ctlz).getReg(0);
85}
86
87/// \returns The big endian in-memory byte position of byte \p I in a
88/// \p ByteWidth bytes wide type.
89///
90/// E.g. Given a 4-byte type x, x[0] -> byte 3
91static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
92 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
93 return ByteWidth - I - 1;
94}
95
96/// Given a map from byte offsets in memory to indices in a load/store,
97/// determine if that map corresponds to a little or big endian byte pattern.
98///
99/// \param MemOffset2Idx maps memory offsets to address offsets.
100/// \param LowestIdx is the lowest index in \p MemOffset2Idx.
101///
102/// \returns true if the map corresponds to a big endian byte pattern, false if
103/// it corresponds to a little endian byte pattern, and std::nullopt otherwise.
104///
105/// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
106/// are as follows:
107///
108/// AddrOffset Little endian Big endian
109/// 0 0 3
110/// 1 1 2
111/// 2 2 1
112/// 3 3 0
113static std::optional<bool>
115 int64_t LowestIdx) {
116 // Need at least two byte positions to decide on endianness.
117 unsigned Width = MemOffset2Idx.size();
118 if (Width < 2)
119 return std::nullopt;
120 bool BigEndian = true, LittleEndian = true;
121 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
122 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
123 if (MemOffsetAndIdx == MemOffset2Idx.end())
124 return std::nullopt;
125 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
126 assert(Idx >= 0 && "Expected non-negative byte offset?");
127 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
128 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
129 if (!BigEndian && !LittleEndian)
130 return std::nullopt;
131 }
132
133 assert((BigEndian != LittleEndian) &&
134 "Pattern cannot be both big and little endian!");
135 return BigEndian;
136}
137
139
140bool CombinerHelper::isLegal(const LegalityQuery &Query) const {
141 assert(LI && "Must have LegalizerInfo to query isLegal!");
142 return LI->getAction(Query).Action == LegalizeActions::Legal;
143}
144
146 const LegalityQuery &Query) const {
147 return isPreLegalize() || isLegal(Query);
148}
149
151 if (!Ty.isVector())
152 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}});
153 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
154 if (isPreLegalize())
155 return true;
156 LLT EltTy = Ty.getElementType();
157 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
158 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
159}
160
162 Register ToReg) const {
164
165 if (MRI.constrainRegAttrs(ToReg, FromReg))
166 MRI.replaceRegWith(FromReg, ToReg);
167 else
168 Builder.buildCopy(ToReg, FromReg);
169
171}
172
174 MachineOperand &FromRegOp,
175 Register ToReg) const {
176 assert(FromRegOp.getParent() && "Expected an operand in an MI");
177 Observer.changingInstr(*FromRegOp.getParent());
178
179 FromRegOp.setReg(ToReg);
180
181 Observer.changedInstr(*FromRegOp.getParent());
182}
183
185 unsigned ToOpcode) const {
186 Observer.changingInstr(FromMI);
187
188 FromMI.setDesc(Builder.getTII().get(ToOpcode));
189
190 Observer.changedInstr(FromMI);
191}
192
194 return RBI->getRegBank(Reg, MRI, *TRI);
195}
196
198 if (RegBank)
199 MRI.setRegBank(Reg, *RegBank);
200}
201
203 if (matchCombineCopy(MI)) {
205 return true;
206 }
207 return false;
208}
210 if (MI.getOpcode() != TargetOpcode::COPY)
211 return false;
212 Register DstReg = MI.getOperand(0).getReg();
213 Register SrcReg = MI.getOperand(1).getReg();
214 return canReplaceReg(DstReg, SrcReg, MRI);
215}
217 Register DstReg = MI.getOperand(0).getReg();
218 Register SrcReg = MI.getOperand(1).getReg();
219 MI.eraseFromParent();
220 replaceRegWith(MRI, DstReg, SrcReg);
221}
222
224 bool IsUndef = false;
226 if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
227 applyCombineConcatVectors(MI, IsUndef, Ops);
228 return true;
229 }
230 return false;
231}
232
235 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
236 "Invalid instruction");
237 IsUndef = true;
238 MachineInstr *Undef = nullptr;
239
240 // Walk over all the operands of concat vectors and check if they are
241 // build_vector themselves or undef.
242 // Then collect their operands in Ops.
243 for (const MachineOperand &MO : MI.uses()) {
244 Register Reg = MO.getReg();
245 MachineInstr *Def = MRI.getVRegDef(Reg);
246 assert(Def && "Operand not defined");
247 switch (Def->getOpcode()) {
248 case TargetOpcode::G_BUILD_VECTOR:
249 IsUndef = false;
250 // Remember the operands of the build_vector to fold
251 // them into the yet-to-build flattened concat vectors.
252 for (const MachineOperand &BuildVecMO : Def->uses())
253 Ops.push_back(BuildVecMO.getReg());
254 break;
255 case TargetOpcode::G_IMPLICIT_DEF: {
256 LLT OpType = MRI.getType(Reg);
257 // Keep one undef value for all the undef operands.
258 if (!Undef) {
259 Builder.setInsertPt(*MI.getParent(), MI);
260 Undef = Builder.buildUndef(OpType.getScalarType());
261 }
262 assert(MRI.getType(Undef->getOperand(0).getReg()) ==
263 OpType.getScalarType() &&
264 "All undefs should have the same type");
265 // Break the undef vector in as many scalar elements as needed
266 // for the flattening.
267 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
268 EltIdx != EltEnd; ++EltIdx)
269 Ops.push_back(Undef->getOperand(0).getReg());
270 break;
271 }
272 default:
273 return false;
274 }
275 }
276 return true;
277}
279 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
280 // We determined that the concat_vectors can be flatten.
281 // Generate the flattened build_vector.
282 Register DstReg = MI.getOperand(0).getReg();
283 Builder.setInsertPt(*MI.getParent(), MI);
284 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
285
286 // Note: IsUndef is sort of redundant. We could have determine it by
287 // checking that at all Ops are undef. Alternatively, we could have
288 // generate a build_vector of undefs and rely on another combine to
289 // clean that up. For now, given we already gather this information
290 // in tryCombineConcatVectors, just save compile time and issue the
291 // right thing.
292 if (IsUndef)
293 Builder.buildUndef(NewDstReg);
294 else
295 Builder.buildBuildVector(NewDstReg, Ops);
296 MI.eraseFromParent();
297 replaceRegWith(MRI, DstReg, NewDstReg);
298}
299
302 if (matchCombineShuffleVector(MI, Ops)) {
304 return true;
305 }
306 return false;
307}
308
311 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
312 "Invalid instruction kind");
313 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
314 Register Src1 = MI.getOperand(1).getReg();
315 LLT SrcType = MRI.getType(Src1);
316 // As bizarre as it may look, shuffle vector can actually produce
317 // scalar! This is because at the IR level a <1 x ty> shuffle
318 // vector is perfectly valid.
319 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
320 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
321
322 // If the resulting vector is smaller than the size of the source
323 // vectors being concatenated, we won't be able to replace the
324 // shuffle vector into a concat_vectors.
325 //
326 // Note: We may still be able to produce a concat_vectors fed by
327 // extract_vector_elt and so on. It is less clear that would
328 // be better though, so don't bother for now.
329 //
330 // If the destination is a scalar, the size of the sources doesn't
331 // matter. we will lower the shuffle to a plain copy. This will
332 // work only if the source and destination have the same size. But
333 // that's covered by the next condition.
334 //
335 // TODO: If the size between the source and destination don't match
336 // we could still emit an extract vector element in that case.
337 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
338 return false;
339
340 // Check that the shuffle mask can be broken evenly between the
341 // different sources.
342 if (DstNumElts % SrcNumElts != 0)
343 return false;
344
345 // Mask length is a multiple of the source vector length.
346 // Check if the shuffle is some kind of concatenation of the input
347 // vectors.
348 unsigned NumConcat = DstNumElts / SrcNumElts;
349 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
350 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
351 for (unsigned i = 0; i != DstNumElts; ++i) {
352 int Idx = Mask[i];
353 // Undef value.
354 if (Idx < 0)
355 continue;
356 // Ensure the indices in each SrcType sized piece are sequential and that
357 // the same source is used for the whole piece.
358 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
359 (ConcatSrcs[i / SrcNumElts] >= 0 &&
360 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
361 return false;
362 // Remember which source this index came from.
363 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
364 }
365
366 // The shuffle is concatenating multiple vectors together.
367 // Collect the different operands for that.
368 Register UndefReg;
369 Register Src2 = MI.getOperand(2).getReg();
370 for (auto Src : ConcatSrcs) {
371 if (Src < 0) {
372 if (!UndefReg) {
373 Builder.setInsertPt(*MI.getParent(), MI);
374 UndefReg = Builder.buildUndef(SrcType).getReg(0);
375 }
376 Ops.push_back(UndefReg);
377 } else if (Src == 0)
378 Ops.push_back(Src1);
379 else
380 Ops.push_back(Src2);
381 }
382 return true;
383}
384
386 const ArrayRef<Register> Ops) {
387 Register DstReg = MI.getOperand(0).getReg();
388 Builder.setInsertPt(*MI.getParent(), MI);
389 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
390
391 if (Ops.size() == 1)
392 Builder.buildCopy(NewDstReg, Ops[0]);
393 else
394 Builder.buildMergeLikeInstr(NewDstReg, Ops);
395
396 MI.eraseFromParent();
397 replaceRegWith(MRI, DstReg, NewDstReg);
398}
399
401 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
402 "Invalid instruction kind");
403
404 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
405 return Mask.size() == 1;
406}
407
409 Register DstReg = MI.getOperand(0).getReg();
410 Builder.setInsertPt(*MI.getParent(), MI);
411
412 int I = MI.getOperand(3).getShuffleMask()[0];
413 Register Src1 = MI.getOperand(1).getReg();
414 LLT Src1Ty = MRI.getType(Src1);
415 int Src1NumElts = Src1Ty.isVector() ? Src1Ty.getNumElements() : 1;
416 Register SrcReg;
417 if (I >= Src1NumElts) {
418 SrcReg = MI.getOperand(2).getReg();
419 I -= Src1NumElts;
420 } else if (I >= 0)
421 SrcReg = Src1;
422
423 if (I < 0)
424 Builder.buildUndef(DstReg);
425 else if (!MRI.getType(SrcReg).isVector())
426 Builder.buildCopy(DstReg, SrcReg);
427 else
429
430 MI.eraseFromParent();
431}
432
433namespace {
434
435/// Select a preference between two uses. CurrentUse is the current preference
436/// while *ForCandidate is attributes of the candidate under consideration.
437PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI,
438 PreferredTuple &CurrentUse,
439 const LLT TyForCandidate,
440 unsigned OpcodeForCandidate,
441 MachineInstr *MIForCandidate) {
442 if (!CurrentUse.Ty.isValid()) {
443 if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
444 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
445 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
446 return CurrentUse;
447 }
448
449 // We permit the extend to hoist through basic blocks but this is only
450 // sensible if the target has extending loads. If you end up lowering back
451 // into a load and extend during the legalizer then the end result is
452 // hoisting the extend up to the load.
453
454 // Prefer defined extensions to undefined extensions as these are more
455 // likely to reduce the number of instructions.
456 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
457 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
458 return CurrentUse;
459 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
460 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
461 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
462
463 // Prefer sign extensions to zero extensions as sign-extensions tend to be
464 // more expensive. Don't do this if the load is already a zero-extend load
465 // though, otherwise we'll rewrite a zero-extend load into a sign-extend
466 // later.
467 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) {
468 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
469 OpcodeForCandidate == TargetOpcode::G_ZEXT)
470 return CurrentUse;
471 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
472 OpcodeForCandidate == TargetOpcode::G_SEXT)
473 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
474 }
475
476 // This is potentially target specific. We've chosen the largest type
477 // because G_TRUNC is usually free. One potential catch with this is that
478 // some targets have a reduced number of larger registers than smaller
479 // registers and this choice potentially increases the live-range for the
480 // larger value.
481 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
482 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
483 }
484 return CurrentUse;
485}
486
487/// Find a suitable place to insert some instructions and insert them. This
488/// function accounts for special cases like inserting before a PHI node.
489/// The current strategy for inserting before PHI's is to duplicate the
490/// instructions for each predecessor. However, while that's ok for G_TRUNC
491/// on most targets since it generally requires no code, other targets/cases may
492/// want to try harder to find a dominating block.
493static void InsertInsnsWithoutSideEffectsBeforeUse(
496 MachineOperand &UseMO)>
497 Inserter) {
498 MachineInstr &UseMI = *UseMO.getParent();
499
500 MachineBasicBlock *InsertBB = UseMI.getParent();
501
502 // If the use is a PHI then we want the predecessor block instead.
503 if (UseMI.isPHI()) {
504 MachineOperand *PredBB = std::next(&UseMO);
505 InsertBB = PredBB->getMBB();
506 }
507
508 // If the block is the same block as the def then we want to insert just after
509 // the def instead of at the start of the block.
510 if (InsertBB == DefMI.getParent()) {
512 Inserter(InsertBB, std::next(InsertPt), UseMO);
513 return;
514 }
515
516 // Otherwise we want the start of the BB
517 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
518}
519} // end anonymous namespace
520
522 PreferredTuple Preferred;
523 if (matchCombineExtendingLoads(MI, Preferred)) {
524 applyCombineExtendingLoads(MI, Preferred);
525 return true;
526 }
527 return false;
528}
529
530static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) {
531 unsigned CandidateLoadOpc;
532 switch (ExtOpc) {
533 case TargetOpcode::G_ANYEXT:
534 CandidateLoadOpc = TargetOpcode::G_LOAD;
535 break;
536 case TargetOpcode::G_SEXT:
537 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
538 break;
539 case TargetOpcode::G_ZEXT:
540 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
541 break;
542 default:
543 llvm_unreachable("Unexpected extend opc");
544 }
545 return CandidateLoadOpc;
546}
547
549 PreferredTuple &Preferred) {
550 // We match the loads and follow the uses to the extend instead of matching
551 // the extends and following the def to the load. This is because the load
552 // must remain in the same position for correctness (unless we also add code
553 // to find a safe place to sink it) whereas the extend is freely movable.
554 // It also prevents us from duplicating the load for the volatile case or just
555 // for performance.
556 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI);
557 if (!LoadMI)
558 return false;
559
560 Register LoadReg = LoadMI->getDstReg();
561
562 LLT LoadValueTy = MRI.getType(LoadReg);
563 if (!LoadValueTy.isScalar())
564 return false;
565
566 // Most architectures are going to legalize <s8 loads into at least a 1 byte
567 // load, and the MMOs can only describe memory accesses in multiples of bytes.
568 // If we try to perform extload combining on those, we can end up with
569 // %a(s8) = extload %ptr (load 1 byte from %ptr)
570 // ... which is an illegal extload instruction.
571 if (LoadValueTy.getSizeInBits() < 8)
572 return false;
573
574 // For non power-of-2 types, they will very likely be legalized into multiple
575 // loads. Don't bother trying to match them into extending loads.
576 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits()))
577 return false;
578
579 // Find the preferred type aside from the any-extends (unless it's the only
580 // one) and non-extending ops. We'll emit an extending load to that type and
581 // and emit a variant of (extend (trunc X)) for the others according to the
582 // relative type sizes. At the same time, pick an extend to use based on the
583 // extend involved in the chosen type.
584 unsigned PreferredOpcode =
585 isa<GLoad>(&MI)
586 ? TargetOpcode::G_ANYEXT
587 : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
588 Preferred = {LLT(), PreferredOpcode, nullptr};
589 for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) {
590 if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
591 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
592 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
593 const auto &MMO = LoadMI->getMMO();
594 // For atomics, only form anyextending loads.
595 if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
596 continue;
597 // Check for legality.
598 if (!isPreLegalize()) {
599 LegalityQuery::MemDesc MMDesc(MMO);
600 unsigned CandidateLoadOpc = getExtLoadOpcForExtend(UseMI.getOpcode());
601 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
602 LLT SrcTy = MRI.getType(LoadMI->getPointerReg());
603 if (LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
604 .Action != LegalizeActions::Legal)
605 continue;
606 }
607 Preferred = ChoosePreferredUse(MI, Preferred,
608 MRI.getType(UseMI.getOperand(0).getReg()),
609 UseMI.getOpcode(), &UseMI);
610 }
611 }
612
613 // There were no extends
614 if (!Preferred.MI)
615 return false;
616 // It should be impossible to chose an extend without selecting a different
617 // type since by definition the result of an extend is larger.
618 assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
619
620 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
621 return true;
622}
623
625 PreferredTuple &Preferred) {
626 // Rewrite the load to the chosen extending load.
627 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
628
629 // Inserter to insert a truncate back to the original type at a given point
630 // with some basic CSE to limit truncate duplication to one per BB.
632 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
633 MachineBasicBlock::iterator InsertBefore,
634 MachineOperand &UseMO) {
635 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
636 if (PreviouslyEmitted) {
638 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
640 return;
641 }
642
643 Builder.setInsertPt(*InsertIntoBB, InsertBefore);
644 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
645 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
646 EmittedInsns[InsertIntoBB] = NewMI;
647 replaceRegOpWith(MRI, UseMO, NewDstReg);
648 };
649
651 unsigned LoadOpc = getExtLoadOpcForExtend(Preferred.ExtendOpcode);
652 MI.setDesc(Builder.getTII().get(LoadOpc));
653
654 // Rewrite all the uses to fix up the types.
655 auto &LoadValue = MI.getOperand(0);
657 for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
658 Uses.push_back(&UseMO);
659
660 for (auto *UseMO : Uses) {
661 MachineInstr *UseMI = UseMO->getParent();
662
663 // If the extend is compatible with the preferred extend then we should fix
664 // up the type and extend so that it uses the preferred use.
665 if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
666 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
667 Register UseDstReg = UseMI->getOperand(0).getReg();
668 MachineOperand &UseSrcMO = UseMI->getOperand(1);
669 const LLT UseDstTy = MRI.getType(UseDstReg);
670 if (UseDstReg != ChosenDstReg) {
671 if (Preferred.Ty == UseDstTy) {
672 // If the use has the same type as the preferred use, then merge
673 // the vregs and erase the extend. For example:
674 // %1:_(s8) = G_LOAD ...
675 // %2:_(s32) = G_SEXT %1(s8)
676 // %3:_(s32) = G_ANYEXT %1(s8)
677 // ... = ... %3(s32)
678 // rewrites to:
679 // %2:_(s32) = G_SEXTLOAD ...
680 // ... = ... %2(s32)
681 replaceRegWith(MRI, UseDstReg, ChosenDstReg);
683 UseMO->getParent()->eraseFromParent();
684 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
685 // If the preferred size is smaller, then keep the extend but extend
686 // from the result of the extending load. For example:
687 // %1:_(s8) = G_LOAD ...
688 // %2:_(s32) = G_SEXT %1(s8)
689 // %3:_(s64) = G_ANYEXT %1(s8)
690 // ... = ... %3(s64)
691 /// rewrites to:
692 // %2:_(s32) = G_SEXTLOAD ...
693 // %3:_(s64) = G_ANYEXT %2:_(s32)
694 // ... = ... %3(s64)
695 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
696 } else {
697 // If the preferred size is large, then insert a truncate. For
698 // example:
699 // %1:_(s8) = G_LOAD ...
700 // %2:_(s64) = G_SEXT %1(s8)
701 // %3:_(s32) = G_ZEXT %1(s8)
702 // ... = ... %3(s32)
703 /// rewrites to:
704 // %2:_(s64) = G_SEXTLOAD ...
705 // %4:_(s8) = G_TRUNC %2:_(s32)
706 // %3:_(s64) = G_ZEXT %2:_(s8)
707 // ... = ... %3(s64)
708 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
709 InsertTruncAt);
710 }
711 continue;
712 }
713 // The use is (one of) the uses of the preferred use we chose earlier.
714 // We're going to update the load to def this value later so just erase
715 // the old extend.
717 UseMO->getParent()->eraseFromParent();
718 continue;
719 }
720
721 // The use isn't an extend. Truncate back to the type we originally loaded.
722 // This is free on many targets.
723 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
724 }
725
726 MI.getOperand(0).setReg(ChosenDstReg);
728}
729
731 BuildFnTy &MatchInfo) {
732 assert(MI.getOpcode() == TargetOpcode::G_AND);
733
734 // If we have the following code:
735 // %mask = G_CONSTANT 255
736 // %ld = G_LOAD %ptr, (load s16)
737 // %and = G_AND %ld, %mask
738 //
739 // Try to fold it into
740 // %ld = G_ZEXTLOAD %ptr, (load s8)
741
742 Register Dst = MI.getOperand(0).getReg();
743 if (MRI.getType(Dst).isVector())
744 return false;
745
746 auto MaybeMask =
747 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
748 if (!MaybeMask)
749 return false;
750
751 APInt MaskVal = MaybeMask->Value;
752
753 if (!MaskVal.isMask())
754 return false;
755
756 Register SrcReg = MI.getOperand(1).getReg();
757 // Don't use getOpcodeDef() here since intermediate instructions may have
758 // multiple users.
759 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg));
760 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg()))
761 return false;
762
763 Register LoadReg = LoadMI->getDstReg();
764 LLT RegTy = MRI.getType(LoadReg);
765 Register PtrReg = LoadMI->getPointerReg();
766 unsigned RegSize = RegTy.getSizeInBits();
767 uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
768 unsigned MaskSizeBits = MaskVal.countr_one();
769
770 // The mask may not be larger than the in-memory type, as it might cover sign
771 // extended bits
772 if (MaskSizeBits > LoadSizeBits)
773 return false;
774
775 // If the mask covers the whole destination register, there's nothing to
776 // extend
777 if (MaskSizeBits >= RegSize)
778 return false;
779
780 // Most targets cannot deal with loads of size < 8 and need to re-legalize to
781 // at least byte loads. Avoid creating such loads here
782 if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits))
783 return false;
784
785 const MachineMemOperand &MMO = LoadMI->getMMO();
786 LegalityQuery::MemDesc MemDesc(MMO);
787
788 // Don't modify the memory access size if this is atomic/volatile, but we can
789 // still adjust the opcode to indicate the high bit behavior.
790 if (LoadMI->isSimple())
791 MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
792 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
793 return false;
794
795 // TODO: Could check if it's legal with the reduced or original memory size.
797 {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}}))
798 return false;
799
800 MatchInfo = [=](MachineIRBuilder &B) {
801 B.setInstrAndDebugLoc(*LoadMI);
802 auto &MF = B.getMF();
803 auto PtrInfo = MMO.getPointerInfo();
804 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy);
805 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
806 LoadMI->eraseFromParent();
807 };
808 return true;
809}
810
812 const MachineInstr &UseMI) {
813 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
814 "shouldn't consider debug uses");
815 assert(DefMI.getParent() == UseMI.getParent());
816 if (&DefMI == &UseMI)
817 return true;
818 const MachineBasicBlock &MBB = *DefMI.getParent();
819 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
820 return &MI == &DefMI || &MI == &UseMI;
821 });
822 if (DefOrUse == MBB.end())
823 llvm_unreachable("Block must contain both DefMI and UseMI!");
824 return &*DefOrUse == &DefMI;
825}
826
828 const MachineInstr &UseMI) {
829 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
830 "shouldn't consider debug uses");
831 if (MDT)
832 return MDT->dominates(&DefMI, &UseMI);
833 else if (DefMI.getParent() != UseMI.getParent())
834 return false;
835
836 return isPredecessor(DefMI, UseMI);
837}
838
840 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
841 Register SrcReg = MI.getOperand(1).getReg();
842 Register LoadUser = SrcReg;
843
844 if (MRI.getType(SrcReg).isVector())
845 return false;
846
847 Register TruncSrc;
848 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
849 LoadUser = TruncSrc;
850
851 uint64_t SizeInBits = MI.getOperand(2).getImm();
852 // If the source is a G_SEXTLOAD from the same bit width, then we don't
853 // need any extend at all, just a truncate.
854 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
855 // If truncating more than the original extended value, abort.
856 auto LoadSizeBits = LoadMI->getMemSizeInBits();
857 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
858 return false;
859 if (LoadSizeBits == SizeInBits)
860 return true;
861 }
862 return false;
863}
864
866 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
868 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
869 MI.eraseFromParent();
870}
871
873 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
874 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
875
876 Register DstReg = MI.getOperand(0).getReg();
877 LLT RegTy = MRI.getType(DstReg);
878
879 // Only supports scalars for now.
880 if (RegTy.isVector())
881 return false;
882
883 Register SrcReg = MI.getOperand(1).getReg();
884 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI);
885 if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
886 return false;
887
888 uint64_t MemBits = LoadDef->getMemSizeInBits();
889
890 // If the sign extend extends from a narrower width than the load's width,
891 // then we can narrow the load width when we combine to a G_SEXTLOAD.
892 // Avoid widening the load at all.
893 unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits);
894
895 // Don't generate G_SEXTLOADs with a < 1 byte width.
896 if (NewSizeBits < 8)
897 return false;
898 // Don't bother creating a non-power-2 sextload, it will likely be broken up
899 // anyway for most targets.
900 if (!isPowerOf2_32(NewSizeBits))
901 return false;
902
903 const MachineMemOperand &MMO = LoadDef->getMMO();
904 LegalityQuery::MemDesc MMDesc(MMO);
905
906 // Don't modify the memory access size if this is atomic/volatile, but we can
907 // still adjust the opcode to indicate the high bit behavior.
908 if (LoadDef->isSimple())
909 MMDesc.MemoryTy = LLT::scalar(NewSizeBits);
910 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits())
911 return false;
912
913 // TODO: Could check if it's legal with the reduced or original memory size.
914 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD,
915 {MRI.getType(LoadDef->getDstReg()),
916 MRI.getType(LoadDef->getPointerReg())},
917 {MMDesc}}))
918 return false;
919
920 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
921 return true;
922}
923
925 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
926 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
927 Register LoadReg;
928 unsigned ScalarSizeBits;
929 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
930 GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg));
931
932 // If we have the following:
933 // %ld = G_LOAD %ptr, (load 2)
934 // %ext = G_SEXT_INREG %ld, 8
935 // ==>
936 // %ld = G_SEXTLOAD %ptr (load 1)
937
938 auto &MMO = LoadDef->getMMO();
940 auto &MF = Builder.getMF();
941 auto PtrInfo = MMO.getPointerInfo();
942 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
943 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
944 LoadDef->getPointerReg(), *NewMMO);
945 MI.eraseFromParent();
946}
947
949 if (Ty.isVector())
951 Ty.getNumElements());
952 return IntegerType::get(C, Ty.getSizeInBits());
953}
954
955/// Return true if 'MI' is a load or a store that may be fold it's address
956/// operand into the load / store addressing mode.
960 auto *MF = MI->getMF();
961 auto *Addr = getOpcodeDef<GPtrAdd>(MI->getPointerReg(), MRI);
962 if (!Addr)
963 return false;
964
965 AM.HasBaseReg = true;
966 if (auto CstOff = getIConstantVRegVal(Addr->getOffsetReg(), MRI))
967 AM.BaseOffs = CstOff->getSExtValue(); // [reg +/- imm]
968 else
969 AM.Scale = 1; // [reg +/- reg]
970
971 return TLI.isLegalAddressingMode(
972 MF->getDataLayout(), AM,
973 getTypeForLLT(MI->getMMO().getMemoryType(),
974 MF->getFunction().getContext()),
975 MI->getMMO().getAddrSpace());
976}
977
978static unsigned getIndexedOpc(unsigned LdStOpc) {
979 switch (LdStOpc) {
980 case TargetOpcode::G_LOAD:
981 return TargetOpcode::G_INDEXED_LOAD;
982 case TargetOpcode::G_STORE:
983 return TargetOpcode::G_INDEXED_STORE;
984 case TargetOpcode::G_ZEXTLOAD:
985 return TargetOpcode::G_INDEXED_ZEXTLOAD;
986 case TargetOpcode::G_SEXTLOAD:
987 return TargetOpcode::G_INDEXED_SEXTLOAD;
988 default:
989 llvm_unreachable("Unexpected opcode");
990 }
991}
992
993bool CombinerHelper::isIndexedLoadStoreLegal(GLoadStore &LdSt) const {
994 // Check for legality.
995 LLT PtrTy = MRI.getType(LdSt.getPointerReg());
996 LLT Ty = MRI.getType(LdSt.getReg(0));
997 LLT MemTy = LdSt.getMMO().getMemoryType();
999 {{MemTy, MemTy.getSizeInBits(), AtomicOrdering::NotAtomic}});
1000 unsigned IndexedOpc = getIndexedOpc(LdSt.getOpcode());
1001 SmallVector<LLT> OpTys;
1002 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
1003 OpTys = {PtrTy, Ty, Ty};
1004 else
1005 OpTys = {Ty, PtrTy}; // For G_INDEXED_LOAD, G_INDEXED_[SZ]EXTLOAD
1006
1007 LegalityQuery Q(IndexedOpc, OpTys, MemDescrs);
1008 return isLegal(Q);
1009}
1010
1012 "post-index-use-threshold", cl::Hidden, cl::init(32),
1013 cl::desc("Number of uses of a base pointer to check before it is no longer "
1014 "considered for post-indexing."));
1015
1016bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr,
1018 bool &RematOffset) {
1019 // We're looking for the following pattern, for either load or store:
1020 // %baseptr:_(p0) = ...
1021 // G_STORE %val(s64), %baseptr(p0)
1022 // %offset:_(s64) = G_CONSTANT i64 -256
1023 // %new_addr:_(p0) = G_PTR_ADD %baseptr, %offset(s64)
1024 const auto &TLI = getTargetLowering();
1025
1026 Register Ptr = LdSt.getPointerReg();
1027 // If the store is the only use, don't bother.
1028 if (MRI.hasOneNonDBGUse(Ptr))
1029 return false;
1030
1031 if (!isIndexedLoadStoreLegal(LdSt))
1032 return false;
1033
1034 if (getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Ptr, MRI))
1035 return false;
1036
1037 MachineInstr *StoredValDef = getDefIgnoringCopies(LdSt.getReg(0), MRI);
1038 auto *PtrDef = MRI.getVRegDef(Ptr);
1039
1040 unsigned NumUsesChecked = 0;
1041 for (auto &Use : MRI.use_nodbg_instructions(Ptr)) {
1042 if (++NumUsesChecked > PostIndexUseThreshold)
1043 return false; // Try to avoid exploding compile time.
1044
1045 auto *PtrAdd = dyn_cast<GPtrAdd>(&Use);
1046 // The use itself might be dead. This can happen during combines if DCE
1047 // hasn't had a chance to run yet. Don't allow it to form an indexed op.
1048 if (!PtrAdd || MRI.use_nodbg_empty(PtrAdd->getReg(0)))
1049 continue;
1050
1051 // Check the user of this isn't the store, otherwise we'd be generate a
1052 // indexed store defining its own use.
1053 if (StoredValDef == &Use)
1054 continue;
1055
1056 Offset = PtrAdd->getOffsetReg();
1057 if (!ForceLegalIndexing &&
1058 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(), Offset,
1059 /*IsPre*/ false, MRI))
1060 continue;
1061
1062 // Make sure the offset calculation is before the potentially indexed op.
1063 MachineInstr *OffsetDef = MRI.getVRegDef(Offset);
1064 RematOffset = false;
1065 if (!dominates(*OffsetDef, LdSt)) {
1066 // If the offset however is just a G_CONSTANT, we can always just
1067 // rematerialize it where we need it.
1068 if (OffsetDef->getOpcode() != TargetOpcode::G_CONSTANT)
1069 continue;
1070 RematOffset = true;
1071 }
1072
1073 for (auto &BasePtrUse : MRI.use_nodbg_instructions(PtrAdd->getBaseReg())) {
1074 if (&BasePtrUse == PtrDef)
1075 continue;
1076
1077 // If the user is a later load/store that can be post-indexed, then don't
1078 // combine this one.
1079 auto *BasePtrLdSt = dyn_cast<GLoadStore>(&BasePtrUse);
1080 if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
1081 dominates(LdSt, *BasePtrLdSt) &&
1082 isIndexedLoadStoreLegal(*BasePtrLdSt))
1083 return false;
1084
1085 // Now we're looking for the key G_PTR_ADD instruction, which contains
1086 // the offset add that we want to fold.
1087 if (auto *BasePtrUseDef = dyn_cast<GPtrAdd>(&BasePtrUse)) {
1088 Register PtrAddDefReg = BasePtrUseDef->getReg(0);
1089 for (auto &BaseUseUse : MRI.use_nodbg_instructions(PtrAddDefReg)) {
1090 // If the use is in a different block, then we may produce worse code
1091 // due to the extra register pressure.
1092 if (BaseUseUse.getParent() != LdSt.getParent())
1093 return false;
1094
1095 if (auto *UseUseLdSt = dyn_cast<GLoadStore>(&BaseUseUse))
1096 if (canFoldInAddressingMode(UseUseLdSt, TLI, MRI))
1097 return false;
1098 }
1099 if (!dominates(LdSt, BasePtrUse))
1100 return false; // All use must be dominated by the load/store.
1101 }
1102 }
1103
1104 Addr = PtrAdd->getReg(0);
1105 Base = PtrAdd->getBaseReg();
1106 return true;
1107 }
1108
1109 return false;
1110}
1111
1112bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr,
1114 auto &MF = *LdSt.getParent()->getParent();
1115 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1116
1117 Addr = LdSt.getPointerReg();
1120 return false;
1121
1122 if (!ForceLegalIndexing &&
1123 !TLI.isIndexingLegal(LdSt, Base, Offset, /*IsPre*/ true, MRI))
1124 return false;
1125
1126 if (!isIndexedLoadStoreLegal(LdSt))
1127 return false;
1128
1130 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
1131 return false;
1132
1133 if (auto *St = dyn_cast<GStore>(&LdSt)) {
1134 // Would require a copy.
1135 if (Base == St->getValueReg())
1136 return false;
1137
1138 // We're expecting one use of Addr in MI, but it could also be the
1139 // value stored, which isn't actually dominated by the instruction.
1140 if (St->getValueReg() == Addr)
1141 return false;
1142 }
1143
1144 // Avoid increasing cross-block register pressure.
1145 for (auto &AddrUse : MRI.use_nodbg_instructions(Addr))
1146 if (AddrUse.getParent() != LdSt.getParent())
1147 return false;
1148
1149 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1150 // That might allow us to end base's liveness here by adjusting the constant.
1151 bool RealUse = false;
1152 for (auto &AddrUse : MRI.use_nodbg_instructions(Addr)) {
1153 if (!dominates(LdSt, AddrUse))
1154 return false; // All use must be dominated by the load/store.
1155
1156 // If Ptr may be folded in addressing mode of other use, then it's
1157 // not profitable to do this transformation.
1158 if (auto *UseLdSt = dyn_cast<GLoadStore>(&AddrUse)) {
1159 if (!canFoldInAddressingMode(UseLdSt, TLI, MRI))
1160 RealUse = true;
1161 } else {
1162 RealUse = true;
1163 }
1164 }
1165 return RealUse;
1166}
1167
1169 BuildFnTy &MatchInfo) {
1170 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1171
1172 // Check if there is a load that defines the vector being extracted from.
1173 auto *LoadMI = getOpcodeDef<GLoad>(MI.getOperand(1).getReg(), MRI);
1174 if (!LoadMI)
1175 return false;
1176
1177 Register Vector = MI.getOperand(1).getReg();
1178 LLT VecEltTy = MRI.getType(Vector).getElementType();
1179
1180 assert(MRI.getType(MI.getOperand(0).getReg()) == VecEltTy);
1181
1182 // Checking whether we should reduce the load width.
1184 return false;
1185
1186 // Check if the defining load is simple.
1187 if (!LoadMI->isSimple())
1188 return false;
1189
1190 // If the vector element type is not a multiple of a byte then we are unable
1191 // to correctly compute an address to load only the extracted element as a
1192 // scalar.
1193 if (!VecEltTy.isByteSized())
1194 return false;
1195
1196 // Check if the new load that we are going to create is legal
1197 // if we are in the post-legalization phase.
1198 MachineMemOperand MMO = LoadMI->getMMO();
1199 Align Alignment = MMO.getAlign();
1200 MachinePointerInfo PtrInfo;
1202
1203 // Finding the appropriate PtrInfo if offset is a known constant.
1204 // This is required to create the memory operand for the narrowed load.
1205 // This machine memory operand object helps us infer about legality
1206 // before we proceed to combine the instruction.
1207 if (auto CVal = getIConstantVRegVal(Vector, MRI)) {
1208 int Elt = CVal->getZExtValue();
1209 // FIXME: should be (ABI size)*Elt.
1210 Offset = VecEltTy.getSizeInBits() * Elt / 8;
1211 PtrInfo = MMO.getPointerInfo().getWithOffset(Offset);
1212 } else {
1213 // Discard the pointer info except the address space because the memory
1214 // operand can't represent this new access since the offset is variable.
1215 Offset = VecEltTy.getSizeInBits() / 8;
1217 }
1218
1219 Alignment = commonAlignment(Alignment, Offset);
1220
1221 Register VecPtr = LoadMI->getPointerReg();
1222 LLT PtrTy = MRI.getType(VecPtr);
1223
1224 MachineFunction &MF = *MI.getMF();
1225 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, VecEltTy);
1226
1227 LegalityQuery::MemDesc MMDesc(*NewMMO);
1228
1229 LegalityQuery Q = {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}};
1230
1232 return false;
1233
1234 // Load must be allowed and fast on the target.
1236 auto &DL = MF.getDataLayout();
1237 unsigned Fast = 0;
1238 if (!getTargetLowering().allowsMemoryAccess(C, DL, VecEltTy, *NewMMO,
1239 &Fast) ||
1240 !Fast)
1241 return false;
1242
1243 Register Result = MI.getOperand(0).getReg();
1244 Register Index = MI.getOperand(2).getReg();
1245
1246 MatchInfo = [=](MachineIRBuilder &B) {
1247 GISelObserverWrapper DummyObserver;
1248 LegalizerHelper Helper(B.getMF(), DummyObserver, B);
1249 //// Get pointer to the vector element.
1250 Register finalPtr = Helper.getVectorElementPointer(
1251 LoadMI->getPointerReg(), MRI.getType(LoadMI->getOperand(0).getReg()),
1252 Index);
1253 // New G_LOAD instruction.
1254 B.buildLoad(Result, finalPtr, PtrInfo, Alignment);
1255 // Remove original GLOAD instruction.
1256 LoadMI->eraseFromParent();
1257 };
1258
1259 return true;
1260}
1261
1264 auto &LdSt = cast<GLoadStore>(MI);
1265
1266 if (LdSt.isAtomic())
1267 return false;
1268
1269 MatchInfo.IsPre = findPreIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base,
1270 MatchInfo.Offset);
1271 if (!MatchInfo.IsPre &&
1272 !findPostIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base,
1273 MatchInfo.Offset, MatchInfo.RematOffset))
1274 return false;
1275
1276 return true;
1277}
1278
1281 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
1283 unsigned Opcode = MI.getOpcode();
1284 bool IsStore = Opcode == TargetOpcode::G_STORE;
1285 unsigned NewOpcode = getIndexedOpc(Opcode);
1286
1287 // If the offset constant didn't happen to dominate the load/store, we can
1288 // just clone it as needed.
1289 if (MatchInfo.RematOffset) {
1290 auto *OldCst = MRI.getVRegDef(MatchInfo.Offset);
1291 auto NewCst = Builder.buildConstant(MRI.getType(MatchInfo.Offset),
1292 *OldCst->getOperand(1).getCImm());
1293 MatchInfo.Offset = NewCst.getReg(0);
1294 }
1295
1296 auto MIB = Builder.buildInstr(NewOpcode);
1297 if (IsStore) {
1298 MIB.addDef(MatchInfo.Addr);
1299 MIB.addUse(MI.getOperand(0).getReg());
1300 } else {
1301 MIB.addDef(MI.getOperand(0).getReg());
1302 MIB.addDef(MatchInfo.Addr);
1303 }
1304
1305 MIB.addUse(MatchInfo.Base);
1306 MIB.addUse(MatchInfo.Offset);
1307 MIB.addImm(MatchInfo.IsPre);
1308 MIB->cloneMemRefs(*MI.getMF(), MI);
1309 MI.eraseFromParent();
1310 AddrDef.eraseFromParent();
1311
1312 LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
1313}
1314
1316 MachineInstr *&OtherMI) {
1317 unsigned Opcode = MI.getOpcode();
1318 bool IsDiv, IsSigned;
1319
1320 switch (Opcode) {
1321 default:
1322 llvm_unreachable("Unexpected opcode!");
1323 case TargetOpcode::G_SDIV:
1324 case TargetOpcode::G_UDIV: {
1325 IsDiv = true;
1326 IsSigned = Opcode == TargetOpcode::G_SDIV;
1327 break;
1328 }
1329 case TargetOpcode::G_SREM:
1330 case TargetOpcode::G_UREM: {
1331 IsDiv = false;
1332 IsSigned = Opcode == TargetOpcode::G_SREM;
1333 break;
1334 }
1335 }
1336
1337 Register Src1 = MI.getOperand(1).getReg();
1338 unsigned DivOpcode, RemOpcode, DivremOpcode;
1339 if (IsSigned) {
1340 DivOpcode = TargetOpcode::G_SDIV;
1341 RemOpcode = TargetOpcode::G_SREM;
1342 DivremOpcode = TargetOpcode::G_SDIVREM;
1343 } else {
1344 DivOpcode = TargetOpcode::G_UDIV;
1345 RemOpcode = TargetOpcode::G_UREM;
1346 DivremOpcode = TargetOpcode::G_UDIVREM;
1347 }
1348
1349 if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}}))
1350 return false;
1351
1352 // Combine:
1353 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1354 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1355 // into:
1356 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1357
1358 // Combine:
1359 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1360 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1361 // into:
1362 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1363
1364 for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) {
1365 if (MI.getParent() == UseMI.getParent() &&
1366 ((IsDiv && UseMI.getOpcode() == RemOpcode) ||
1367 (!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
1368 matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) &&
1369 matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) {
1370 OtherMI = &UseMI;
1371 return true;
1372 }
1373 }
1374
1375 return false;
1376}
1377
1379 MachineInstr *&OtherMI) {
1380 unsigned Opcode = MI.getOpcode();
1381 assert(OtherMI && "OtherMI shouldn't be empty.");
1382
1383 Register DestDivReg, DestRemReg;
1384 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1385 DestDivReg = MI.getOperand(0).getReg();
1386 DestRemReg = OtherMI->getOperand(0).getReg();
1387 } else {
1388 DestDivReg = OtherMI->getOperand(0).getReg();
1389 DestRemReg = MI.getOperand(0).getReg();
1390 }
1391
1392 bool IsSigned =
1393 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1394
1395 // Check which instruction is first in the block so we don't break def-use
1396 // deps by "moving" the instruction incorrectly. Also keep track of which
1397 // instruction is first so we pick it's operands, avoiding use-before-def
1398 // bugs.
1399 MachineInstr *FirstInst;
1400 if (dominates(MI, *OtherMI)) {
1402 FirstInst = &MI;
1403 } else {
1404 Builder.setInstrAndDebugLoc(*OtherMI);
1405 FirstInst = OtherMI;
1406 }
1407
1408 Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM
1409 : TargetOpcode::G_UDIVREM,
1410 {DestDivReg, DestRemReg},
1411 { FirstInst->getOperand(1), FirstInst->getOperand(2) });
1412 MI.eraseFromParent();
1413 OtherMI->eraseFromParent();
1414}
1415
1417 MachineInstr *&BrCond) {
1418 assert(MI.getOpcode() == TargetOpcode::G_BR);
1419
1420 // Try to match the following:
1421 // bb1:
1422 // G_BRCOND %c1, %bb2
1423 // G_BR %bb3
1424 // bb2:
1425 // ...
1426 // bb3:
1427
1428 // The above pattern does not have a fall through to the successor bb2, always
1429 // resulting in a branch no matter which path is taken. Here we try to find
1430 // and replace that pattern with conditional branch to bb3 and otherwise
1431 // fallthrough to bb2. This is generally better for branch predictors.
1432
1433 MachineBasicBlock *MBB = MI.getParent();
1435 if (BrIt == MBB->begin())
1436 return false;
1437 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
1438
1439 BrCond = &*std::prev(BrIt);
1440 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
1441 return false;
1442
1443 // Check that the next block is the conditional branch target. Also make sure
1444 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1445 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
1446 return BrCondTarget != MI.getOperand(0).getMBB() &&
1447 MBB->isLayoutSuccessor(BrCondTarget);
1448}
1449
1451 MachineInstr *&BrCond) {
1452 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
1454 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
1455 // FIXME: Does int/fp matter for this? If so, we might need to restrict
1456 // this to i1 only since we might not know for sure what kind of
1457 // compare generated the condition value.
1458 auto True = Builder.buildConstant(
1459 Ty, getICmpTrueVal(getTargetLowering(), false, false));
1460 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
1461
1462 auto *FallthroughBB = BrCond->getOperand(1).getMBB();
1464 MI.getOperand(0).setMBB(FallthroughBB);
1466
1467 // Change the conditional branch to use the inverted condition and
1468 // new target block.
1469 Observer.changingInstr(*BrCond);
1470 BrCond->getOperand(0).setReg(Xor.getReg(0));
1471 BrCond->getOperand(1).setMBB(BrTarget);
1472 Observer.changedInstr(*BrCond);
1473}
1474
1475
1477 MachineIRBuilder HelperBuilder(MI);
1478 GISelObserverWrapper DummyObserver;
1479 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1480 return Helper.lowerMemcpyInline(MI) ==
1482}
1483
1485 MachineIRBuilder HelperBuilder(MI);
1486 GISelObserverWrapper DummyObserver;
1487 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
1488 return Helper.lowerMemCpyFamily(MI, MaxLen) ==
1490}
1491
1493 const MachineRegisterInfo &MRI,
1494 const APFloat &Val) {
1495 APFloat Result(Val);
1496 switch (MI.getOpcode()) {
1497 default:
1498 llvm_unreachable("Unexpected opcode!");
1499 case TargetOpcode::G_FNEG: {
1500 Result.changeSign();
1501 return Result;
1502 }
1503 case TargetOpcode::G_FABS: {
1504 Result.clearSign();
1505 return Result;
1506 }
1507 case TargetOpcode::G_FPTRUNC: {
1508 bool Unused;
1509 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1511 &Unused);
1512 return Result;
1513 }
1514 case TargetOpcode::G_FSQRT: {
1515 bool Unused;
1517 &Unused);
1518 Result = APFloat(sqrt(Result.convertToDouble()));
1519 break;
1520 }
1521 case TargetOpcode::G_FLOG2: {
1522 bool Unused;
1524 &Unused);
1525 Result = APFloat(log2(Result.convertToDouble()));
1526 break;
1527 }
1528 }
1529 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1530 // `buildFConstant` will assert on size mismatch. Only `G_FSQRT`, and
1531 // `G_FLOG2` reach here.
1532 bool Unused;
1533 Result.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &Unused);
1534 return Result;
1535}
1536
1538 const ConstantFP *Cst) {
1540 APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue());
1541 const ConstantFP *NewCst = ConstantFP::get(Builder.getContext(), Folded);
1542 Builder.buildFConstant(MI.getOperand(0), *NewCst);
1543 MI.eraseFromParent();
1544}
1545
1547 PtrAddChain &MatchInfo) {
1548 // We're trying to match the following pattern:
1549 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1550 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1551 // -->
1552 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1553
1554 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1555 return false;
1556
1557 Register Add2 = MI.getOperand(1).getReg();
1558 Register Imm1 = MI.getOperand(2).getReg();
1559 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1560 if (!MaybeImmVal)
1561 return false;
1562
1563 MachineInstr *Add2Def = MRI.getVRegDef(Add2);
1564 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1565 return false;
1566
1567 Register Base = Add2Def->getOperand(1).getReg();
1568 Register Imm2 = Add2Def->getOperand(2).getReg();
1569 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1570 if (!MaybeImm2Val)
1571 return false;
1572
1573 // Check if the new combined immediate forms an illegal addressing mode.
1574 // Do not combine if it was legal before but would get illegal.
1575 // To do so, we need to find a load/store user of the pointer to get
1576 // the access type.
1577 Type *AccessTy = nullptr;
1578 auto &MF = *MI.getMF();
1579 for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
1580 if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) {
1581 AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)),
1582 MF.getFunction().getContext());
1583 break;
1584 }
1585 }
1587 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1588 AMNew.BaseOffs = CombinedImm.getSExtValue();
1589 if (AccessTy) {
1590 AMNew.HasBaseReg = true;
1592 AMOld.BaseOffs = MaybeImmVal->Value.getSExtValue();
1593 AMOld.HasBaseReg = true;
1594 unsigned AS = MRI.getType(Add2).getAddressSpace();
1595 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1596 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1597 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1598 return false;
1599 }
1600
1601 // Pass the combined immediate to the apply function.
1602 MatchInfo.Imm = AMNew.BaseOffs;
1603 MatchInfo.Base = Base;
1604 MatchInfo.Bank = getRegBank(Imm2);
1605 return true;
1606}
1607
1609 PtrAddChain &MatchInfo) {
1610 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1611 MachineIRBuilder MIB(MI);
1612 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1613 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1614 setRegBank(NewOffset.getReg(0), MatchInfo.Bank);
1616 MI.getOperand(1).setReg(MatchInfo.Base);
1617 MI.getOperand(2).setReg(NewOffset.getReg(0));
1619}
1620
1622 RegisterImmPair &MatchInfo) {
1623 // We're trying to match the following pattern with any of
1624 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1625 // %t1 = SHIFT %base, G_CONSTANT imm1
1626 // %root = SHIFT %t1, G_CONSTANT imm2
1627 // -->
1628 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1629
1630 unsigned Opcode = MI.getOpcode();
1631 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1632 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1633 Opcode == TargetOpcode::G_USHLSAT) &&
1634 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1635
1636 Register Shl2 = MI.getOperand(1).getReg();
1637 Register Imm1 = MI.getOperand(2).getReg();
1638 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI);
1639 if (!MaybeImmVal)
1640 return false;
1641
1642 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1643 if (Shl2Def->getOpcode() != Opcode)
1644 return false;
1645
1646 Register Base = Shl2Def->getOperand(1).getReg();
1647 Register Imm2 = Shl2Def->getOperand(2).getReg();
1648 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI);
1649 if (!MaybeImm2Val)
1650 return false;
1651
1652 // Pass the combined immediate to the apply function.
1653 MatchInfo.Imm =
1654 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
1655 MatchInfo.Reg = Base;
1656
1657 // There is no simple replacement for a saturating unsigned left shift that
1658 // exceeds the scalar size.
1659 if (Opcode == TargetOpcode::G_USHLSAT &&
1660 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1661 return false;
1662
1663 return true;
1664}
1665
1667 RegisterImmPair &MatchInfo) {
1668 unsigned Opcode = MI.getOpcode();
1669 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1670 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1671 Opcode == TargetOpcode::G_USHLSAT) &&
1672 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1673
1675 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1676 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1677 auto Imm = MatchInfo.Imm;
1678
1679 if (Imm >= ScalarSizeInBits) {
1680 // Any logical shift that exceeds scalar size will produce zero.
1681 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1682 Builder.buildConstant(MI.getOperand(0), 0);
1683 MI.eraseFromParent();
1684 return;
1685 }
1686 // Arithmetic shift and saturating signed left shift have no effect beyond
1687 // scalar size.
1688 Imm = ScalarSizeInBits - 1;
1689 }
1690
1691 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1692 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1694 MI.getOperand(1).setReg(MatchInfo.Reg);
1695 MI.getOperand(2).setReg(NewImm);
1697}
1698
1700 ShiftOfShiftedLogic &MatchInfo) {
1701 // We're trying to match the following pattern with any of
1702 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1703 // with any of G_AND/G_OR/G_XOR logic instructions.
1704 // %t1 = SHIFT %X, G_CONSTANT C0
1705 // %t2 = LOGIC %t1, %Y
1706 // %root = SHIFT %t2, G_CONSTANT C1
1707 // -->
1708 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1709 // %t4 = SHIFT %Y, G_CONSTANT C1
1710 // %root = LOGIC %t3, %t4
1711 unsigned ShiftOpcode = MI.getOpcode();
1712 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1713 ShiftOpcode == TargetOpcode::G_ASHR ||
1714 ShiftOpcode == TargetOpcode::G_LSHR ||
1715 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1716 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1717 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1718
1719 // Match a one-use bitwise logic op.
1720 Register LogicDest = MI.getOperand(1).getReg();
1721 if (!MRI.hasOneNonDBGUse(LogicDest))
1722 return false;
1723
1724 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1725 unsigned LogicOpcode = LogicMI->getOpcode();
1726 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1727 LogicOpcode != TargetOpcode::G_XOR)
1728 return false;
1729
1730 // Find a matching one-use shift by constant.
1731 const Register C1 = MI.getOperand(2).getReg();
1732 auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI);
1733 if (!MaybeImmVal || MaybeImmVal->Value == 0)
1734 return false;
1735
1736 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1737
1738 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1739 // Shift should match previous one and should be a one-use.
1740 if (MI->getOpcode() != ShiftOpcode ||
1741 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1742 return false;
1743
1744 // Must be a constant.
1745 auto MaybeImmVal =
1746 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1747 if (!MaybeImmVal)
1748 return false;
1749
1750 ShiftVal = MaybeImmVal->Value.getSExtValue();
1751 return true;
1752 };
1753
1754 // Logic ops are commutative, so check each operand for a match.
1755 Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1756 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1757 Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1758 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1759 uint64_t C0Val;
1760
1761 if (matchFirstShift(LogicMIOp1, C0Val)) {
1762 MatchInfo.LogicNonShiftReg = LogicMIReg2;
1763 MatchInfo.Shift2 = LogicMIOp1;
1764 } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1765 MatchInfo.LogicNonShiftReg = LogicMIReg1;
1766 MatchInfo.Shift2 = LogicMIOp2;
1767 } else
1768 return false;
1769
1770 MatchInfo.ValSum = C0Val + C1Val;
1771
1772 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1773 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1774 return false;
1775
1776 MatchInfo.Logic = LogicMI;
1777 return true;
1778}
1779
1781 ShiftOfShiftedLogic &MatchInfo) {
1782 unsigned Opcode = MI.getOpcode();
1783 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1784 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1785 Opcode == TargetOpcode::G_SSHLSAT) &&
1786 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1787
1788 LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1789 LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1791
1792 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1793
1794 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1795 Register Shift1 =
1796 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1797
1798 // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same
1799 // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when
1800 // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we
1801 // remove old shift1. And it will cause crash later. So erase it earlier to
1802 // avoid the crash.
1803 MatchInfo.Shift2->eraseFromParent();
1804
1805 Register Shift2Const = MI.getOperand(2).getReg();
1806 Register Shift2 = Builder
1807 .buildInstr(Opcode, {DestType},
1808 {MatchInfo.LogicNonShiftReg, Shift2Const})
1809 .getReg(0);
1810
1811 Register Dest = MI.getOperand(0).getReg();
1812 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1813
1814 // This was one use so it's safe to remove it.
1815 MatchInfo.Logic->eraseFromParent();
1816
1817 MI.eraseFromParent();
1818}
1819
1821 assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL");
1822 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1823 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1824 auto &Shl = cast<GenericMachineInstr>(MI);
1825 Register DstReg = Shl.getReg(0);
1826 Register SrcReg = Shl.getReg(1);
1827 Register ShiftReg = Shl.getReg(2);
1828 Register X, C1;
1829
1830 if (!getTargetLowering().isDesirableToCommuteWithShift(MI, !isPreLegalize()))
1831 return false;
1832
1833 if (!mi_match(SrcReg, MRI,
1835 m_GOr(m_Reg(X), m_Reg(C1))))))
1836 return false;
1837
1838 APInt C1Val, C2Val;
1839 if (!mi_match(C1, MRI, m_ICstOrSplat(C1Val)) ||
1840 !mi_match(ShiftReg, MRI, m_ICstOrSplat(C2Val)))
1841 return false;
1842
1843 auto *SrcDef = MRI.getVRegDef(SrcReg);
1844 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
1845 SrcDef->getOpcode() == TargetOpcode::G_OR) && "Unexpected op");
1846 LLT SrcTy = MRI.getType(SrcReg);
1847 MatchInfo = [=](MachineIRBuilder &B) {
1848 auto S1 = B.buildShl(SrcTy, X, ShiftReg);
1849 auto S2 = B.buildShl(SrcTy, C1, ShiftReg);
1850 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2});
1851 };
1852 return true;
1853}
1854
1856 unsigned &ShiftVal) {
1857 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1858 auto MaybeImmVal =
1859 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1860 if (!MaybeImmVal)
1861 return false;
1862
1863 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1864 return (static_cast<int32_t>(ShiftVal) != -1);
1865}
1866
1868 unsigned &ShiftVal) {
1869 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1870 MachineIRBuilder MIB(MI);
1871 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1872 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1874 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1875 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1877}
1878
1879// shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1881 RegisterImmPair &MatchData) {
1882 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1883 if (!getTargetLowering().isDesirableToPullExtFromShl(MI))
1884 return false;
1885
1886 Register LHS = MI.getOperand(1).getReg();
1887
1888 Register ExtSrc;
1889 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1890 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1891 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1892 return false;
1893
1894 Register RHS = MI.getOperand(2).getReg();
1895 MachineInstr *MIShiftAmt = MRI.getVRegDef(RHS);
1896 auto MaybeShiftAmtVal = isConstantOrConstantSplatVector(*MIShiftAmt, MRI);
1897 if (!MaybeShiftAmtVal)
1898 return false;
1899
1900 if (LI) {
1901 LLT SrcTy = MRI.getType(ExtSrc);
1902
1903 // We only really care about the legality with the shifted value. We can
1904 // pick any type the constant shift amount, so ask the target what to
1905 // use. Otherwise we would have to guess and hope it is reported as legal.
1906 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1907 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1908 return false;
1909 }
1910
1911 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
1912 MatchData.Reg = ExtSrc;
1913 MatchData.Imm = ShiftAmt;
1914
1915 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one();
1916 unsigned SrcTySize = MRI.getType(ExtSrc).getScalarSizeInBits();
1917 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
1918}
1919
1921 const RegisterImmPair &MatchData) {
1922 Register ExtSrcReg = MatchData.Reg;
1923 int64_t ShiftAmtVal = MatchData.Imm;
1924
1925 LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1927 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1928 auto NarrowShift =
1929 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1930 Builder.buildZExt(MI.getOperand(0), NarrowShift);
1931 MI.eraseFromParent();
1932}
1933
1935 Register &MatchInfo) {
1936 GMerge &Merge = cast<GMerge>(MI);
1937 SmallVector<Register, 16> MergedValues;
1938 for (unsigned I = 0; I < Merge.getNumSources(); ++I)
1939 MergedValues.emplace_back(Merge.getSourceReg(I));
1940
1941 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI);
1942 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources())
1943 return false;
1944
1945 for (unsigned I = 0; I < MergedValues.size(); ++I)
1946 if (MergedValues[I] != Unmerge->getReg(I))
1947 return false;
1948
1949 MatchInfo = Unmerge->getSourceReg();
1950 return true;
1951}
1952
1954 const MachineRegisterInfo &MRI) {
1955 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1956 ;
1957
1958 return Reg;
1959}
1960
1963 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1964 "Expected an unmerge");
1965 auto &Unmerge = cast<GUnmerge>(MI);
1966 Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI);
1967
1968 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg, MRI);
1969 if (!SrcInstr)
1970 return false;
1971
1972 // Check the source type of the merge.
1973 LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0));
1974 LLT Dst0Ty = MRI.getType(Unmerge.getReg(0));
1975 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1976 if (SrcMergeTy != Dst0Ty && !SameSize)
1977 return false;
1978 // They are the same now (modulo a bitcast).
1979 // We can collect all the src registers.
1980 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1981 Operands.push_back(SrcInstr->getSourceReg(Idx));
1982 return true;
1983}
1984
1987 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1988 "Expected an unmerge");
1989 assert((MI.getNumOperands() - 1 == Operands.size()) &&
1990 "Not enough operands to replace all defs");
1991 unsigned NumElems = MI.getNumOperands() - 1;
1992
1993 LLT SrcTy = MRI.getType(Operands[0]);
1994 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1995 bool CanReuseInputDirectly = DstTy == SrcTy;
1997 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1998 Register DstReg = MI.getOperand(Idx).getReg();
1999 Register SrcReg = Operands[Idx];
2000
2001 // This combine may run after RegBankSelect, so we need to be aware of
2002 // register banks.
2003 const auto &DstCB = MRI.getRegClassOrRegBank(DstReg);
2004 if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(SrcReg)) {
2005 SrcReg = Builder.buildCopy(MRI.getType(SrcReg), SrcReg).getReg(0);
2006 MRI.setRegClassOrRegBank(SrcReg, DstCB);
2007 }
2008
2009 if (CanReuseInputDirectly)
2010 replaceRegWith(MRI, DstReg, SrcReg);
2011 else
2012 Builder.buildCast(DstReg, SrcReg);
2013 }
2014 MI.eraseFromParent();
2015}
2016
2018 SmallVectorImpl<APInt> &Csts) {
2019 unsigned SrcIdx = MI.getNumOperands() - 1;
2020 Register SrcReg = MI.getOperand(SrcIdx).getReg();
2021 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
2022 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
2023 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
2024 return false;
2025 // Break down the big constant in smaller ones.
2026 const MachineOperand &CstVal = SrcInstr->getOperand(1);
2027 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
2028 ? CstVal.getCImm()->getValue()
2029 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
2030
2031 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
2032 unsigned ShiftAmt = Dst0Ty.getSizeInBits();
2033 // Unmerge a constant.
2034 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
2035 Csts.emplace_back(Val.trunc(ShiftAmt));
2036 Val = Val.lshr(ShiftAmt);
2037 }
2038
2039 return true;
2040}
2041
2043 SmallVectorImpl<APInt> &Csts) {
2044 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2045 "Expected an unmerge");
2046 assert((MI.getNumOperands() - 1 == Csts.size()) &&
2047 "Not enough operands to replace all defs");
2048 unsigned NumElems = MI.getNumOperands() - 1;
2050 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2051 Register DstReg = MI.getOperand(Idx).getReg();
2052 Builder.buildConstant(DstReg, Csts[Idx]);
2053 }
2054
2055 MI.eraseFromParent();
2056}
2057
2059 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
2060 unsigned SrcIdx = MI.getNumOperands() - 1;
2061 Register SrcReg = MI.getOperand(SrcIdx).getReg();
2062 MatchInfo = [&MI](MachineIRBuilder &B) {
2063 unsigned NumElems = MI.getNumOperands() - 1;
2064 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
2065 Register DstReg = MI.getOperand(Idx).getReg();
2066 B.buildUndef(DstReg);
2067 }
2068 };
2069 return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
2070}
2071
2073 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2074 "Expected an unmerge");
2075 // Check that all the lanes are dead except the first one.
2076 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2077 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
2078 return false;
2079 }
2080 return true;
2081}
2082
2085 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
2086 // Truncating a vector is going to truncate every single lane,
2087 // whereas we want the full lowbits.
2088 // Do the operation on a scalar instead.
2089 LLT SrcTy = MRI.getType(SrcReg);
2090 if (SrcTy.isVector())
2091 SrcReg =
2092 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
2093
2094 Register Dst0Reg = MI.getOperand(0).getReg();
2095 LLT Dst0Ty = MRI.getType(Dst0Reg);
2096 if (Dst0Ty.isVector()) {
2097 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
2098 Builder.buildCast(Dst0Reg, MIB);
2099 } else
2100 Builder.buildTrunc(Dst0Reg, SrcReg);
2101 MI.eraseFromParent();
2102}
2103
2105 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2106 "Expected an unmerge");
2107 Register Dst0Reg = MI.getOperand(0).getReg();
2108 LLT Dst0Ty = MRI.getType(Dst0Reg);
2109 // G_ZEXT on vector applies to each lane, so it will
2110 // affect all destinations. Therefore we won't be able
2111 // to simplify the unmerge to just the first definition.
2112 if (Dst0Ty.isVector())
2113 return false;
2114 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
2115 LLT SrcTy = MRI.getType(SrcReg);
2116 if (SrcTy.isVector())
2117 return false;
2118
2119 Register ZExtSrcReg;
2120 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
2121 return false;
2122
2123 // Finally we can replace the first definition with
2124 // a zext of the source if the definition is big enough to hold
2125 // all of ZExtSrc bits.
2126 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2127 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
2128}
2129
2131 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2132 "Expected an unmerge");
2133
2134 Register Dst0Reg = MI.getOperand(0).getReg();
2135
2136 MachineInstr *ZExtInstr =
2137 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
2138 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
2139 "Expecting a G_ZEXT");
2140
2141 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
2142 LLT Dst0Ty = MRI.getType(Dst0Reg);
2143 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2144
2146
2147 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
2148 Builder.buildZExt(Dst0Reg, ZExtSrcReg);
2149 } else {
2150 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
2151 "ZExt src doesn't fit in destination");
2152 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
2153 }
2154
2155 Register ZeroReg;
2156 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2157 if (!ZeroReg)
2158 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
2159 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
2160 }
2161 MI.eraseFromParent();
2162}
2163
2165 unsigned TargetShiftSize,
2166 unsigned &ShiftVal) {
2167 assert((MI.getOpcode() == TargetOpcode::G_SHL ||
2168 MI.getOpcode() == TargetOpcode::G_LSHR ||
2169 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
2170
2171 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2172 if (Ty.isVector()) // TODO:
2173 return false;
2174
2175 // Don't narrow further than the requested size.
2176 unsigned Size = Ty.getSizeInBits();
2177 if (Size <= TargetShiftSize)
2178 return false;
2179
2180 auto MaybeImmVal =
2181 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
2182 if (!MaybeImmVal)
2183 return false;
2184
2185 ShiftVal = MaybeImmVal->Value.getSExtValue();
2186 return ShiftVal >= Size / 2 && ShiftVal < Size;
2187}
2188
2190 const unsigned &ShiftVal) {
2191 Register DstReg = MI.getOperand(0).getReg();
2192 Register SrcReg = MI.getOperand(1).getReg();
2193 LLT Ty = MRI.getType(SrcReg);
2194 unsigned Size = Ty.getSizeInBits();
2195 unsigned HalfSize = Size / 2;
2196 assert(ShiftVal >= HalfSize);
2197
2198 LLT HalfTy = LLT::scalar(HalfSize);
2199
2201 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
2202 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2203
2204 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2205 Register Narrowed = Unmerge.getReg(1);
2206
2207 // dst = G_LSHR s64:x, C for C >= 32
2208 // =>
2209 // lo, hi = G_UNMERGE_VALUES x
2210 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2211
2212 if (NarrowShiftAmt != 0) {
2213 Narrowed = Builder.buildLShr(HalfTy, Narrowed,
2214 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2215 }
2216
2217 auto Zero = Builder.buildConstant(HalfTy, 0);
2218 Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero});
2219 } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2220 Register Narrowed = Unmerge.getReg(0);
2221 // dst = G_SHL s64:x, C for C >= 32
2222 // =>
2223 // lo, hi = G_UNMERGE_VALUES x
2224 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2225 if (NarrowShiftAmt != 0) {
2226 Narrowed = Builder.buildShl(HalfTy, Narrowed,
2227 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2228 }
2229
2230 auto Zero = Builder.buildConstant(HalfTy, 0);
2231 Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed});
2232 } else {
2233 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2234 auto Hi = Builder.buildAShr(
2235 HalfTy, Unmerge.getReg(1),
2236 Builder.buildConstant(HalfTy, HalfSize - 1));
2237
2238 if (ShiftVal == HalfSize) {
2239 // (G_ASHR i64:x, 32) ->
2240 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2241 Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi});
2242 } else if (ShiftVal == Size - 1) {
2243 // Don't need a second shift.
2244 // (G_ASHR i64:x, 63) ->
2245 // %narrowed = (G_ASHR hi_32(x), 31)
2246 // G_MERGE_VALUES %narrowed, %narrowed
2247 Builder.buildMergeLikeInstr(DstReg, {Hi, Hi});
2248 } else {
2249 auto Lo = Builder.buildAShr(
2250 HalfTy, Unmerge.getReg(1),
2251 Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
2252
2253 // (G_ASHR i64:x, C) ->, for C >= 32
2254 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2255 Builder.buildMergeLikeInstr(DstReg, {Lo, Hi});
2256 }
2257 }
2258
2259 MI.eraseFromParent();
2260}
2261
2263 unsigned TargetShiftAmount) {
2264 unsigned ShiftAmt;
2265 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
2266 applyCombineShiftToUnmerge(MI, ShiftAmt);
2267 return true;
2268 }
2269
2270 return false;
2271}
2272
2274 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2275 Register DstReg = MI.getOperand(0).getReg();
2276 LLT DstTy = MRI.getType(DstReg);
2277 Register SrcReg = MI.getOperand(1).getReg();
2278 return mi_match(SrcReg, MRI,
2279 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2280}
2281
2283 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2284 Register DstReg = MI.getOperand(0).getReg();
2286 Builder.buildCopy(DstReg, Reg);
2287 MI.eraseFromParent();
2288}
2289
2291 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2292 Register DstReg = MI.getOperand(0).getReg();
2294 Builder.buildZExtOrTrunc(DstReg, Reg);
2295 MI.eraseFromParent();
2296}
2297
2299 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2300 assert(MI.getOpcode() == TargetOpcode::G_ADD);
2301 Register LHS = MI.getOperand(1).getReg();
2302 Register RHS = MI.getOperand(2).getReg();
2303 LLT IntTy = MRI.getType(LHS);
2304
2305 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2306 // instruction.
2307 PtrReg.second = false;
2308 for (Register SrcReg : {LHS, RHS}) {
2309 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2310 // Don't handle cases where the integer is implicitly converted to the
2311 // pointer width.
2312 LLT PtrTy = MRI.getType(PtrReg.first);
2313 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2314 return true;
2315 }
2316
2317 PtrReg.second = true;
2318 }
2319
2320 return false;
2321}
2322
2324 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2325 Register Dst = MI.getOperand(0).getReg();
2326 Register LHS = MI.getOperand(1).getReg();
2327 Register RHS = MI.getOperand(2).getReg();
2328
2329 const bool DoCommute = PtrReg.second;
2330 if (DoCommute)
2331 std::swap(LHS, RHS);
2332 LHS = PtrReg.first;
2333
2334 LLT PtrTy = MRI.getType(LHS);
2335
2337 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2338 Builder.buildPtrToInt(Dst, PtrAdd);
2339 MI.eraseFromParent();
2340}
2341
2343 APInt &NewCst) {
2344 auto &PtrAdd = cast<GPtrAdd>(MI);
2345 Register LHS = PtrAdd.getBaseReg();
2346 Register RHS = PtrAdd.getOffsetReg();
2348
2349 if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) {
2350 APInt Cst;
2351 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2352 auto DstTy = MRI.getType(PtrAdd.getReg(0));
2353 // G_INTTOPTR uses zero-extension
2354 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits());
2355 NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits());
2356 return true;
2357 }
2358 }
2359
2360 return false;
2361}
2362
2364 APInt &NewCst) {
2365 auto &PtrAdd = cast<GPtrAdd>(MI);
2366 Register Dst = PtrAdd.getReg(0);
2367
2369 Builder.buildConstant(Dst, NewCst);
2370 PtrAdd.eraseFromParent();
2371}
2372
2374 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2375 Register DstReg = MI.getOperand(0).getReg();
2376 Register SrcReg = MI.getOperand(1).getReg();
2377 LLT DstTy = MRI.getType(DstReg);
2378 return mi_match(SrcReg, MRI,
2379 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2380}
2381
2383 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
2384 Register DstReg = MI.getOperand(0).getReg();
2385 Register SrcReg = MI.getOperand(1).getReg();
2386 LLT DstTy = MRI.getType(DstReg);
2387 if (mi_match(SrcReg, MRI,
2388 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) {
2389 unsigned DstSize = DstTy.getScalarSizeInBits();
2390 unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits();
2391 return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize;
2392 }
2393 return false;
2394}
2395
2397 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2398 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2399 MI.getOpcode() == TargetOpcode::G_SEXT ||
2400 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2401 "Expected a G_[ASZ]EXT");
2402 Register SrcReg = MI.getOperand(1).getReg();
2403 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2404 // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2405 unsigned Opc = MI.getOpcode();
2406 unsigned SrcOpc = SrcMI->getOpcode();
2407 if (Opc == SrcOpc ||
2408 (Opc == TargetOpcode::G_ANYEXT &&
2409 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2410 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2411 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2412 return true;
2413 }
2414 return false;
2415}
2416
2418 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2419 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2420 MI.getOpcode() == TargetOpcode::G_SEXT ||
2421 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2422 "Expected a G_[ASZ]EXT");
2423
2424 Register Reg = std::get<0>(MatchInfo);
2425 unsigned SrcExtOp = std::get<1>(MatchInfo);
2426
2427 // Combine exts with the same opcode.
2428 if (MI.getOpcode() == SrcExtOp) {
2430 MI.getOperand(1).setReg(Reg);
2432 return;
2433 }
2434
2435 // Combine:
2436 // - anyext([sz]ext x) to [sz]ext x
2437 // - sext(zext x) to zext x
2438 if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2439 (MI.getOpcode() == TargetOpcode::G_SEXT &&
2440 SrcExtOp == TargetOpcode::G_ZEXT)) {
2441 Register DstReg = MI.getOperand(0).getReg();
2443 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2444 MI.eraseFromParent();
2445 }
2446}
2447
2449 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2450 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2451 Register SrcReg = MI.getOperand(1).getReg();
2452 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2453 unsigned SrcOpc = SrcMI->getOpcode();
2454 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2455 SrcOpc == TargetOpcode::G_ZEXT) {
2456 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2457 return true;
2458 }
2459 return false;
2460}
2461
2463 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2464 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2465 Register SrcReg = MatchInfo.first;
2466 unsigned SrcExtOp = MatchInfo.second;
2467 Register DstReg = MI.getOperand(0).getReg();
2468 LLT SrcTy = MRI.getType(SrcReg);
2469 LLT DstTy = MRI.getType(DstReg);
2470 if (SrcTy == DstTy) {
2471 MI.eraseFromParent();
2472 replaceRegWith(MRI, DstReg, SrcReg);
2473 return;
2474 }
2476 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2477 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2478 else
2479 Builder.buildTrunc(DstReg, SrcReg);
2480 MI.eraseFromParent();
2481}
2482
2484 const unsigned ShiftSize = ShiftTy.getScalarSizeInBits();
2485 const unsigned TruncSize = TruncTy.getScalarSizeInBits();
2486
2487 // ShiftTy > 32 > TruncTy -> 32
2488 if (ShiftSize > 32 && TruncSize < 32)
2489 return ShiftTy.changeElementSize(32);
2490
2491 // TODO: We could also reduce to 16 bits, but that's more target-dependent.
2492 // Some targets like it, some don't, some only like it under certain
2493 // conditions/processor versions, etc.
2494 // A TL hook might be needed for this.
2495
2496 // Don't combine
2497 return ShiftTy;
2498}
2499
2501 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2502 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2503 Register DstReg = MI.getOperand(0).getReg();
2504 Register SrcReg = MI.getOperand(1).getReg();
2505
2506 if (!MRI.hasOneNonDBGUse(SrcReg))
2507 return false;
2508
2509 LLT SrcTy = MRI.getType(SrcReg);
2510 LLT DstTy = MRI.getType(DstReg);
2511
2512 MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI);
2513 const auto &TL = getTargetLowering();
2514
2515 LLT NewShiftTy;
2516 switch (SrcMI->getOpcode()) {
2517 default:
2518 return false;
2519 case TargetOpcode::G_SHL: {
2520 NewShiftTy = DstTy;
2521
2522 // Make sure new shift amount is legal.
2523 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2524 if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits()))
2525 return false;
2526 break;
2527 }
2528 case TargetOpcode::G_LSHR:
2529 case TargetOpcode::G_ASHR: {
2530 // For right shifts, we conservatively do not do the transform if the TRUNC
2531 // has any STORE users. The reason is that if we change the type of the
2532 // shift, we may break the truncstore combine.
2533 //
2534 // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
2535 for (auto &User : MRI.use_instructions(DstReg))
2536 if (User.getOpcode() == TargetOpcode::G_STORE)
2537 return false;
2538
2539 NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy);
2540 if (NewShiftTy == SrcTy)
2541 return false;
2542
2543 // Make sure we won't lose information by truncating the high bits.
2544 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg());
2545 if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() -
2546 DstTy.getScalarSizeInBits()))
2547 return false;
2548 break;
2549 }
2550 }
2551
2553 {SrcMI->getOpcode(),
2554 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2555 return false;
2556
2557 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2558 return true;
2559}
2560
2562 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
2564
2565 MachineInstr *ShiftMI = MatchInfo.first;
2566 LLT NewShiftTy = MatchInfo.second;
2567
2568 Register Dst = MI.getOperand(0).getReg();
2569 LLT DstTy = MRI.getType(Dst);
2570
2571 Register ShiftAmt = ShiftMI->getOperand(2).getReg();
2572 Register ShiftSrc = ShiftMI->getOperand(1).getReg();
2573 ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0);
2574
2575 Register NewShift =
2576 Builder
2577 .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt})
2578 .getReg(0);
2579
2580 if (NewShiftTy == DstTy)
2581 replaceRegWith(MRI, Dst, NewShift);
2582 else
2583 Builder.buildTrunc(Dst, NewShift);
2584
2585 eraseInst(MI);
2586}
2587
2589 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2590 return MO.isReg() &&
2591 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2592 });
2593}
2594
2596 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2597 return !MO.isReg() ||
2598 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2599 });
2600}
2601
2603 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2604 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2605 return all_of(Mask, [](int Elt) { return Elt < 0; });
2606}
2607
2609 assert(MI.getOpcode() == TargetOpcode::G_STORE);
2610 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2611 MRI);
2612}
2613
2615 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2616 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2617 MRI);
2618}
2619
2621 assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2622 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2623 "Expected an insert/extract element op");
2624 LLT VecTy = MRI.getType(MI.getOperand(1).getReg());
2625 unsigned IdxIdx =
2626 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2627 auto Idx = getIConstantVRegVal(MI.getOperand(IdxIdx).getReg(), MRI);
2628 if (!Idx)
2629 return false;
2630 return Idx->getZExtValue() >= VecTy.getNumElements();
2631}
2632
2634 GSelect &SelMI = cast<GSelect>(MI);
2635 auto Cst =
2637 if (!Cst)
2638 return false;
2639 OpIdx = Cst->isZero() ? 3 : 2;
2640 return true;
2641}
2642
2643void CombinerHelper::eraseInst(MachineInstr &MI) { MI.eraseFromParent(); }
2644
2646 const MachineOperand &MOP2) {
2647 if (!MOP1.isReg() || !MOP2.isReg())
2648 return false;
2649 auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
2650 if (!InstAndDef1)
2651 return false;
2652 auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI);
2653 if (!InstAndDef2)
2654 return false;
2655 MachineInstr *I1 = InstAndDef1->MI;
2656 MachineInstr *I2 = InstAndDef2->MI;
2657
2658 // Handle a case like this:
2659 //
2660 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2661 //
2662 // Even though %0 and %1 are produced by the same instruction they are not
2663 // the same values.
2664 if (I1 == I2)
2665 return MOP1.getReg() == MOP2.getReg();
2666
2667 // If we have an instruction which loads or stores, we can't guarantee that
2668 // it is identical.
2669 //
2670 // For example, we may have
2671 //
2672 // %x1 = G_LOAD %addr (load N from @somewhere)
2673 // ...
2674 // call @foo
2675 // ...
2676 // %x2 = G_LOAD %addr (load N from @somewhere)
2677 // ...
2678 // %or = G_OR %x1, %x2
2679 //
2680 // It's possible that @foo will modify whatever lives at the address we're
2681 // loading from. To be safe, let's just assume that all loads and stores
2682 // are different (unless we have something which is guaranteed to not
2683 // change.)
2684 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2685 return false;
2686
2687 // If both instructions are loads or stores, they are equal only if both
2688 // are dereferenceable invariant loads with the same number of bits.
2689 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) {
2690 GLoadStore *LS1 = dyn_cast<GLoadStore>(I1);
2691 GLoadStore *LS2 = dyn_cast<GLoadStore>(I2);
2692 if (!LS1 || !LS2)
2693 return false;
2694
2695 if (!I2->isDereferenceableInvariantLoad() ||
2696 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits()))
2697 return false;
2698 }
2699
2700 // Check for physical registers on the instructions first to avoid cases
2701 // like this:
2702 //
2703 // %a = COPY $physreg
2704 // ...
2705 // SOMETHING implicit-def $physreg
2706 // ...
2707 // %b = COPY $physreg
2708 //
2709 // These copies are not equivalent.
2710 if (any_of(I1->uses(), [](const MachineOperand &MO) {
2711 return MO.isReg() && MO.getReg().isPhysical();
2712 })) {
2713 // Check if we have a case like this:
2714 //
2715 // %a = COPY $physreg
2716 // %b = COPY %a
2717 //
2718 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2719 // From that, we know that they must have the same value, since they must
2720 // have come from the same COPY.
2721 return I1->isIdenticalTo(*I2);
2722 }
2723
2724 // We don't have any physical registers, so we don't necessarily need the
2725 // same vreg defs.
2726 //
2727 // On the off-chance that there's some target instruction feeding into the
2728 // instruction, let's use produceSameValue instead of isIdenticalTo.
2729 if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) {
2730 // Handle instructions with multiple defs that produce same values. Values
2731 // are same for operands with same index.
2732 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2733 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2734 // I1 and I2 are different instructions but produce same values,
2735 // %1 and %6 are same, %1 and %7 are not the same value.
2736 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2737 I2->findRegisterDefOperandIdx(InstAndDef2->Reg);
2738 }
2739 return false;
2740}
2741
2743 if (!MOP.isReg())
2744 return false;
2745 auto *MI = MRI.getVRegDef(MOP.getReg());
2746 auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI);
2747 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2748 MaybeCst->getSExtValue() == C;
2749}
2750
2752 if (!MOP.isReg())
2753 return false;
2754 std::optional<FPValueAndVReg> MaybeCst;
2755 if (!mi_match(MOP.getReg(), MRI, m_GFCstOrSplat(MaybeCst)))
2756 return false;
2757
2758 return MaybeCst->Value.isExactlyValue(C);
2759}
2760
2762 unsigned OpIdx) {
2763 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2764 Register OldReg = MI.getOperand(0).getReg();
2765 Register Replacement = MI.getOperand(OpIdx).getReg();
2766 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2767 MI.eraseFromParent();
2768 replaceRegWith(MRI, OldReg, Replacement);
2769}
2770
2772 Register Replacement) {
2773 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2774 Register OldReg = MI.getOperand(0).getReg();
2775 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2776 MI.eraseFromParent();
2777 replaceRegWith(MRI, OldReg, Replacement);
2778}
2779
2781 unsigned ConstIdx) {
2782 Register ConstReg = MI.getOperand(ConstIdx).getReg();
2783 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2784
2785 // Get the shift amount
2786 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstReg, MRI);
2787 if (!VRegAndVal)
2788 return false;
2789
2790 // Return true of shift amount >= Bitwidth
2791 return (VRegAndVal->Value.uge(DstTy.getSizeInBits()));
2792}
2793
2795 assert((MI.getOpcode() == TargetOpcode::G_FSHL ||
2796 MI.getOpcode() == TargetOpcode::G_FSHR) &&
2797 "This is not a funnel shift operation");
2798
2799 Register ConstReg = MI.getOperand(3).getReg();
2800 LLT ConstTy = MRI.getType(ConstReg);
2801 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2802
2803 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstReg, MRI);
2804 assert((VRegAndVal) && "Value is not a constant");
2805
2806 // Calculate the new Shift Amount = Old Shift Amount % BitWidth
2807 APInt NewConst = VRegAndVal->Value.urem(
2808 APInt(ConstTy.getSizeInBits(), DstTy.getScalarSizeInBits()));
2809
2811 auto NewConstInstr = Builder.buildConstant(ConstTy, NewConst.getZExtValue());
2813 MI.getOpcode(), {MI.getOperand(0)},
2814 {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)});
2815
2816 MI.eraseFromParent();
2817}
2818
2820 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2821 // Match (cond ? x : x)
2822 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2823 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2824 MRI);
2825}
2826
2828 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2829 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2830 MRI);
2831}
2832
2834 return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2835 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2836 MRI);
2837}
2838
2840 MachineOperand &MO = MI.getOperand(OpIdx);
2841 return MO.isReg() &&
2842 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2843}
2844
2846 unsigned OpIdx) {
2847 MachineOperand &MO = MI.getOperand(OpIdx);
2848 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2849}
2850
2852 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2854 Builder.buildFConstant(MI.getOperand(0), C);
2855 MI.eraseFromParent();
2856}
2857
2859 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2861 Builder.buildConstant(MI.getOperand(0), C);
2862 MI.eraseFromParent();
2863}
2864
2866 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2868 Builder.buildConstant(MI.getOperand(0), C);
2869 MI.eraseFromParent();
2870}
2871
2873 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2875 Builder.buildFConstant(MI.getOperand(0), CFP->getValueAPF());
2876 MI.eraseFromParent();
2877}
2878
2880 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2882 Builder.buildUndef(MI.getOperand(0));
2883 MI.eraseFromParent();
2884}
2885
2887 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2888 Register LHS = MI.getOperand(1).getReg();
2889 Register RHS = MI.getOperand(2).getReg();
2890 Register &NewLHS = std::get<0>(MatchInfo);
2891 Register &NewRHS = std::get<1>(MatchInfo);
2892
2893 // Helper lambda to check for opportunities for
2894 // ((0-A) + B) -> B - A
2895 // (A + (0-B)) -> A - B
2896 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2897 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2898 return false;
2899 NewLHS = MaybeNewLHS;
2900 return true;
2901 };
2902
2903 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2904}
2905
2908 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2909 "Invalid opcode");
2910 Register DstReg = MI.getOperand(0).getReg();
2911 LLT DstTy = MRI.getType(DstReg);
2912 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2913 unsigned NumElts = DstTy.getNumElements();
2914 // If this MI is part of a sequence of insert_vec_elts, then
2915 // don't do the combine in the middle of the sequence.
2916 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2917 TargetOpcode::G_INSERT_VECTOR_ELT)
2918 return false;
2919 MachineInstr *CurrInst = &MI;
2920 MachineInstr *TmpInst;
2921 int64_t IntImm;
2922 Register TmpReg;
2923 MatchInfo.resize(NumElts);
2924 while (mi_match(
2925 CurrInst->getOperand(0).getReg(), MRI,
2926 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2927 if (IntImm >= NumElts || IntImm < 0)
2928 return false;
2929 if (!MatchInfo[IntImm])
2930 MatchInfo[IntImm] = TmpReg;
2931 CurrInst = TmpInst;
2932 }
2933 // Variable index.
2934 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2935 return false;
2936 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2937 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2938 if (!MatchInfo[I - 1].isValid())
2939 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2940 }
2941 return true;
2942 }
2943 // If we didn't end in a G_IMPLICIT_DEF, bail out.
2944 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2945}
2946
2950 Register UndefReg;
2951 auto GetUndef = [&]() {
2952 if (UndefReg)
2953 return UndefReg;
2954 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2955 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2956 return UndefReg;
2957 };
2958 for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2959 if (!MatchInfo[I])
2960 MatchInfo[I] = GetUndef();
2961 }
2962 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2963 MI.eraseFromParent();
2964}
2965
2967 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2969 Register SubLHS, SubRHS;
2970 std::tie(SubLHS, SubRHS) = MatchInfo;
2971 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2972 MI.eraseFromParent();
2973}
2974
2977 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2978 //
2979 // Creates the new hand + logic instruction (but does not insert them.)
2980 //
2981 // On success, MatchInfo is populated with the new instructions. These are
2982 // inserted in applyHoistLogicOpWithSameOpcodeHands.
2983 unsigned LogicOpcode = MI.getOpcode();
2984 assert(LogicOpcode == TargetOpcode::G_AND ||
2985 LogicOpcode == TargetOpcode::G_OR ||
2986 LogicOpcode == TargetOpcode::G_XOR);
2987 MachineIRBuilder MIB(MI);
2988 Register Dst = MI.getOperand(0).getReg();
2989 Register LHSReg = MI.getOperand(1).getReg();
2990 Register RHSReg = MI.getOperand(2).getReg();
2991
2992 // Don't recompute anything.
2993 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2994 return false;
2995
2996 // Make sure we have (hand x, ...), (hand y, ...)
2997 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2998 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2999 if (!LeftHandInst || !RightHandInst)
3000 return false;
3001 unsigned HandOpcode = LeftHandInst->getOpcode();
3002 if (HandOpcode != RightHandInst->getOpcode())
3003 return false;
3004 if (!LeftHandInst->getOperand(1).isReg() ||
3005 !RightHandInst->getOperand(1).isReg())
3006 return false;
3007
3008 // Make sure the types match up, and if we're doing this post-legalization,
3009 // we end up with legal types.
3010 Register X = LeftHandInst->getOperand(1).getReg();
3011 Register Y = RightHandInst->getOperand(1).getReg();
3012 LLT XTy = MRI.getType(X);
3013 LLT YTy = MRI.getType(Y);
3014 if (!XTy.isValid() || XTy != YTy)
3015 return false;
3016
3017 // Optional extra source register.
3018 Register ExtraHandOpSrcReg;
3019 switch (HandOpcode) {
3020 default:
3021 return false;
3022 case TargetOpcode::G_ANYEXT:
3023 case TargetOpcode::G_SEXT:
3024 case TargetOpcode::G_ZEXT: {
3025 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
3026 break;
3027 }
3028 case TargetOpcode::G_AND:
3029 case TargetOpcode::G_ASHR:
3030 case TargetOpcode::G_LSHR:
3031 case TargetOpcode::G_SHL: {
3032 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
3033 MachineOperand &ZOp = LeftHandInst->getOperand(2);
3034 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
3035 return false;
3036 ExtraHandOpSrcReg = ZOp.getReg();
3037 break;
3038 }
3039 }
3040
3041 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
3042 return false;
3043
3044 // Record the steps to build the new instructions.
3045 //
3046 // Steps to build (logic x, y)
3047 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
3048 OperandBuildSteps LogicBuildSteps = {
3049 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
3050 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
3051 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
3052 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
3053
3054 // Steps to build hand (logic x, y), ...z
3055 OperandBuildSteps HandBuildSteps = {
3056 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
3057 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
3058 if (ExtraHandOpSrcReg.isValid())
3059 HandBuildSteps.push_back(
3060 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
3061 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
3062
3063 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
3064 return true;
3065}
3066
3069 assert(MatchInfo.InstrsToBuild.size() &&
3070 "Expected at least one instr to build?");
3072 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
3073 assert(InstrToBuild.Opcode && "Expected a valid opcode?");
3074 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
3075 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
3076 for (auto &OperandFn : InstrToBuild.OperandFns)
3077 OperandFn(Instr);
3078 }
3079 MI.eraseFromParent();
3080}
3081
3083 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
3084 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3085 int64_t ShlCst, AshrCst;
3086 Register Src;
3087 if (!mi_match(MI.getOperand(0).getReg(), MRI,
3088 m_GAShr(m_GShl(m_Reg(Src), m_ICstOrSplat(ShlCst)),
3089 m_ICstOrSplat(AshrCst))))
3090 return false;
3091 if (ShlCst != AshrCst)
3092 return false;
3094 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
3095 return false;
3096 MatchInfo = std::make_tuple(Src, ShlCst);
3097 return true;
3098}
3099
3101 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
3102 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
3103 Register Src;
3104 int64_t ShiftAmt;
3105 std::tie(Src, ShiftAmt) = MatchInfo;
3106 unsigned Size = MRI.getType(Src).getScalarSizeInBits();
3108 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
3109 MI.eraseFromParent();
3110}
3111
3112/// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
3114 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3115 assert(MI.getOpcode() == TargetOpcode::G_AND);
3116
3117 Register Dst = MI.getOperand(0).getReg();
3118 LLT Ty = MRI.getType(Dst);
3119
3120 Register R;
3121 int64_t C1;
3122 int64_t C2;
3123 if (!mi_match(
3124 Dst, MRI,
3125 m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2))))
3126 return false;
3127
3128 MatchInfo = [=](MachineIRBuilder &B) {
3129 if (C1 & C2) {
3130 B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2));
3131 return;
3132 }
3133 auto Zero = B.buildConstant(Ty, 0);
3134 replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg());
3135 };
3136 return true;
3137}
3138
3140 Register &Replacement) {
3141 // Given
3142 //
3143 // %y:_(sN) = G_SOMETHING
3144 // %x:_(sN) = G_SOMETHING
3145 // %res:_(sN) = G_AND %x, %y
3146 //
3147 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
3148 //
3149 // Patterns like this can appear as a result of legalization. E.g.
3150 //
3151 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
3152 // %one:_(s32) = G_CONSTANT i32 1
3153 // %and:_(s32) = G_AND %cmp, %one
3154 //
3155 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
3156 assert(MI.getOpcode() == TargetOpcode::G_AND);
3157 if (!KB)
3158 return false;
3159
3160 Register AndDst = MI.getOperand(0).getReg();
3161 Register LHS = MI.getOperand(1).getReg();
3162 Register RHS = MI.getOperand(2).getReg();
3163 KnownBits LHSBits = KB->getKnownBits(LHS);
3164 KnownBits RHSBits = KB->getKnownBits(RHS);
3165
3166 // Check that x & Mask == x.
3167 // x & 1 == x, always
3168 // x & 0 == x, only if x is also 0
3169 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
3170 //
3171 // Check if we can replace AndDst with the LHS of the G_AND
3172 if (canReplaceReg(AndDst, LHS, MRI) &&
3173 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
3174 Replacement = LHS;
3175 return true;
3176 }
3177
3178 // Check if we can replace AndDst with the RHS of the G_AND
3179 if (canReplaceReg(AndDst, RHS, MRI) &&
3180 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
3181 Replacement = RHS;
3182 return true;
3183 }
3184
3185 return false;
3186}
3187
3189 // Given
3190 //
3191 // %y:_(sN) = G_SOMETHING
3192 // %x:_(sN) = G_SOMETHING
3193 // %res:_(sN) = G_OR %x, %y
3194 //
3195 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
3196 assert(MI.getOpcode() == TargetOpcode::G_OR);
3197 if (!KB)
3198 return false;
3199
3200 Register OrDst = MI.getOperand(0).getReg();
3201 Register LHS = MI.getOperand(1).getReg();
3202 Register RHS = MI.getOperand(2).getReg();
3203 KnownBits LHSBits = KB->getKnownBits(LHS);
3204 KnownBits RHSBits = KB->getKnownBits(RHS);
3205
3206 // Check that x | Mask == x.
3207 // x | 0 == x, always
3208 // x | 1 == x, only if x is also 1
3209 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
3210 //
3211 // Check if we can replace OrDst with the LHS of the G_OR
3212 if (canReplaceReg(OrDst, LHS, MRI) &&
3213 (LHSBits.One | RHSBits.Zero).isAllOnes()) {
3214 Replacement = LHS;
3215 return true;
3216 }
3217
3218 // Check if we can replace OrDst with the RHS of the G_OR
3219 if (canReplaceReg(OrDst, RHS, MRI) &&
3220 (LHSBits.Zero | RHSBits.One).isAllOnes()) {
3221 Replacement = RHS;
3222 return true;
3223 }
3224
3225 return false;
3226}
3227
3229 // If the input is already sign extended, just drop the extension.
3230 Register Src = MI.getOperand(1).getReg();
3231 unsigned ExtBits = MI.getOperand(2).getImm();
3232 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
3233 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
3234}
3235
3236static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3237 int64_t Cst, bool IsVector, bool IsFP) {
3238 // For i1, Cst will always be -1 regardless of boolean contents.
3239 return (ScalarSizeBits == 1 && Cst == -1) ||
3240 isConstTrueVal(TLI, Cst, IsVector, IsFP);
3241}
3242
3244 SmallVectorImpl<Register> &RegsToNegate) {
3245 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3246 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3247 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3248 Register XorSrc;
3249 Register CstReg;
3250 // We match xor(src, true) here.
3251 if (!mi_match(MI.getOperand(0).getReg(), MRI,
3252 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
3253 return false;
3254
3255 if (!MRI.hasOneNonDBGUse(XorSrc))
3256 return false;
3257
3258 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3259 // and ORs. The suffix of RegsToNegate starting from index I is used a work
3260 // list of tree nodes to visit.
3261 RegsToNegate.push_back(XorSrc);
3262 // Remember whether the comparisons are all integer or all floating point.
3263 bool IsInt = false;
3264 bool IsFP = false;
3265 for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3266 Register Reg = RegsToNegate[I];
3267 if (!MRI.hasOneNonDBGUse(Reg))
3268 return false;
3269 MachineInstr *Def = MRI.getVRegDef(Reg);
3270 switch (Def->getOpcode()) {
3271 default:
3272 // Don't match if the tree contains anything other than ANDs, ORs and
3273 // comparisons.
3274 return false;
3275 case TargetOpcode::G_ICMP:
3276 if (IsFP)
3277 return false;
3278 IsInt = true;
3279 // When we apply the combine we will invert the predicate.
3280 break;
3281 case TargetOpcode::G_FCMP:
3282 if (IsInt)
3283 return false;
3284 IsFP = true;
3285 // When we apply the combine we will invert the predicate.
3286 break;
3287 case TargetOpcode::G_AND:
3288 case TargetOpcode::G_OR:
3289 // Implement De Morgan's laws:
3290 // ~(x & y) -> ~x | ~y
3291 // ~(x | y) -> ~x & ~y
3292 // When we apply the combine we will change the opcode and recursively
3293 // negate the operands.
3294 RegsToNegate.push_back(Def->getOperand(1).getReg());
3295 RegsToNegate.push_back(Def->getOperand(2).getReg());
3296 break;
3297 }
3298 }
3299
3300 // Now we know whether the comparisons are integer or floating point, check
3301 // the constant in the xor.
3302 int64_t Cst;
3303 if (Ty.isVector()) {
3304 MachineInstr *CstDef = MRI.getVRegDef(CstReg);
3305 auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI);
3306 if (!MaybeCst)
3307 return false;
3308 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
3309 return false;
3310 } else {
3311 if (!mi_match(CstReg, MRI, m_ICst(Cst)))
3312 return false;
3313 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
3314 return false;
3315 }
3316
3317 return true;
3318}
3319
3321 SmallVectorImpl<Register> &RegsToNegate) {
3322 for (Register Reg : RegsToNegate) {
3323 MachineInstr *Def = MRI.getVRegDef(Reg);
3324 Observer.changingInstr(*Def);
3325 // For each comparison, invert the opcode. For each AND and OR, change the
3326 // opcode.
3327 switch (Def->getOpcode()) {
3328 default:
3329 llvm_unreachable("Unexpected opcode");
3330 case TargetOpcode::G_ICMP:
3331 case TargetOpcode::G_FCMP: {
3332 MachineOperand &PredOp = Def->getOperand(1);
3335 PredOp.setPredicate(NewP);
3336 break;
3337 }
3338 case TargetOpcode::G_AND:
3339 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3340 break;
3341 case TargetOpcode::G_OR:
3342 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3343 break;
3344 }
3345 Observer.changedInstr(*Def);
3346 }
3347
3348 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3349 MI.eraseFromParent();
3350}
3351
3353 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3354 // Match (xor (and x, y), y) (or any of its commuted cases)
3355 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3356 Register &X = MatchInfo.first;
3357 Register &Y = MatchInfo.second;
3358 Register AndReg = MI.getOperand(1).getReg();
3359 Register SharedReg = MI.getOperand(2).getReg();
3360
3361 // Find a G_AND on either side of the G_XOR.
3362 // Look for one of
3363 //
3364 // (xor (and x, y), SharedReg)
3365 // (xor SharedReg, (and x, y))
3366 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3367 std::swap(AndReg, SharedReg);
3368 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3369 return false;
3370 }
3371
3372 // Only do this if we'll eliminate the G_AND.
3373 if (!MRI.hasOneNonDBGUse(AndReg))
3374 return false;
3375
3376 // We can combine if SharedReg is the same as either the LHS or RHS of the
3377 // G_AND.
3378 if (Y != SharedReg)
3379 std::swap(X, Y);
3380 return Y == SharedReg;
3381}
3382
3384 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3385 // Fold (xor (and x, y), y) -> (and (not x), y)
3387 Register X, Y;
3388 std::tie(X, Y) = MatchInfo;
3389 auto Not = Builder.buildNot(MRI.getType(X), X);
3391 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3392 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3393 MI.getOperand(2).setReg(Y);
3395}
3396
3398 auto &PtrAdd = cast<GPtrAdd>(MI);
3399 Register DstReg = PtrAdd.getReg(0);
3400 LLT Ty = MRI.getType(DstReg);
3402
3403 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3404 return false;
3405
3406 if (Ty.isPointer()) {
3407 auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI);
3408 return ConstVal && *ConstVal == 0;
3409 }
3410
3411 assert(Ty.isVector() && "Expecting a vector type");
3412 const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg());
3413 return isBuildVectorAllZeros(*VecMI, MRI);
3414}
3415
3417 auto &PtrAdd = cast<GPtrAdd>(MI);
3419 Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
3420 PtrAdd.eraseFromParent();
3421}
3422
3423/// The second source operand is known to be a power of 2.
3425 Register DstReg = MI.getOperand(0).getReg();
3426 Register Src0 = MI.getOperand(1).getReg();
3427 Register Pow2Src1 = MI.getOperand(2).getReg();
3428 LLT Ty = MRI.getType(DstReg);
3430
3431 // Fold (urem x, pow2) -> (and x, pow2-1)
3432 auto NegOne = Builder.buildConstant(Ty, -1);
3433 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3434 Builder.buildAnd(DstReg, Src0, Add);
3435 MI.eraseFromParent();
3436}
3437
3439 unsigned &SelectOpNo) {
3440 Register LHS = MI.getOperand(1).getReg();
3441 Register RHS = MI.getOperand(2).getReg();
3442
3443 Register OtherOperandReg = RHS;
3444 SelectOpNo = 1;
3446
3447 // Don't do this unless the old select is going away. We want to eliminate the
3448 // binary operator, not replace a binop with a select.
3449 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3451 OtherOperandReg = LHS;
3452 SelectOpNo = 2;
3454 if (Select->getOpcode() != TargetOpcode::G_SELECT ||
3456 return false;
3457 }
3458
3459 MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
3460 MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
3461
3462 if (!isConstantOrConstantVector(*SelectLHS, MRI,
3463 /*AllowFP*/ true,
3464 /*AllowOpaqueConstants*/ false))
3465 return false;
3466 if (!isConstantOrConstantVector(*SelectRHS, MRI,
3467 /*AllowFP*/ true,
3468 /*AllowOpaqueConstants*/ false))
3469 return false;
3470
3471 unsigned BinOpcode = MI.getOpcode();
3472
3473 // We know that one of the operands is a select of constants. Now verify that
3474 // the other binary operator operand is either a constant, or we can handle a
3475 // variable.
3476 bool CanFoldNonConst =
3477 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3478 (isNullOrNullSplat(*SelectLHS, MRI) ||
3479 isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
3480 (isNullOrNullSplat(*SelectRHS, MRI) ||
3481 isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
3482 if (CanFoldNonConst)
3483 return true;
3484
3485 return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
3486 /*AllowFP*/ true,
3487 /*AllowOpaqueConstants*/ false);
3488}
3489
3490/// \p SelectOperand is the operand in binary operator \p MI that is the select
3491/// to fold.
3493 const unsigned &SelectOperand) {
3495
3496 Register Dst = MI.getOperand(0).getReg();
3497 Register LHS = MI.getOperand(1).getReg();
3498 Register RHS = MI.getOperand(2).getReg();
3499 MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
3500
3501 Register SelectCond = Select->getOperand(1).getReg();
3502 Register SelectTrue = Select->getOperand(2).getReg();
3503 Register SelectFalse = Select->getOperand(3).getReg();
3504
3505 LLT Ty = MRI.getType(Dst);
3506 unsigned BinOpcode = MI.getOpcode();
3507
3508 Register FoldTrue, FoldFalse;
3509
3510 // We have a select-of-constants followed by a binary operator with a
3511 // constant. Eliminate the binop by pulling the constant math into the select.
3512 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3513 if (SelectOperand == 1) {
3514 // TODO: SelectionDAG verifies this actually constant folds before
3515 // committing to the combine.
3516
3517 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
3518 FoldFalse =
3519 Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
3520 } else {
3521 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
3522 FoldFalse =
3523 Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
3524 }
3525
3526 Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
3527 MI.eraseFromParent();
3528}
3529
3530std::optional<SmallVector<Register, 8>>
3531CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3532 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3533 // We want to detect if Root is part of a tree which represents a bunch
3534 // of loads being merged into a larger load. We'll try to recognize patterns
3535 // like, for example:
3536 //
3537 // Reg Reg
3538 // \ /
3539 // OR_1 Reg
3540 // \ /
3541 // OR_2
3542 // \ Reg
3543 // .. /
3544 // Root
3545 //
3546 // Reg Reg Reg Reg
3547 // \ / \ /
3548 // OR_1 OR_2
3549 // \ /
3550 // \ /
3551 // ...
3552 // Root
3553 //
3554 // Each "Reg" may have been produced by a load + some arithmetic. This
3555 // function will save each of them.
3556 SmallVector<Register, 8> RegsToVisit;
3558
3559 // In the "worst" case, we're dealing with a load for each byte. So, there
3560 // are at most #bytes - 1 ORs.
3561 const unsigned MaxIter =
3562 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3563 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3564 if (Ors.empty())
3565 break;
3566 const MachineInstr *Curr = Ors.pop_back_val();
3567 Register OrLHS = Curr->getOperand(1).getReg();
3568 Register OrRHS = Curr->getOperand(2).getReg();
3569
3570 // In the combine, we want to elimate the entire tree.
3571 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3572 return std::nullopt;
3573
3574 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3575 // something that may be a load + arithmetic.
3576 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3577 Ors.push_back(Or);
3578 else
3579 RegsToVisit.push_back(OrLHS);
3580 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3581 Ors.push_back(Or);
3582 else
3583 RegsToVisit.push_back(OrRHS);
3584 }
3585
3586 // We're going to try and merge each register into a wider power-of-2 type,
3587 // so we ought to have an even number of registers.
3588 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3589 return std::nullopt;
3590 return RegsToVisit;
3591}
3592
3593/// Helper function for findLoadOffsetsForLoadOrCombine.
3594///
3595/// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3596/// and then moving that value into a specific byte offset.
3597///
3598/// e.g. x[i] << 24
3599///
3600/// \returns The load instruction and the byte offset it is moved into.
3601static std::optional<std::pair<GZExtLoad *, int64_t>>
3602matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3603 const MachineRegisterInfo &MRI) {
3604 assert(MRI.hasOneNonDBGUse(Reg) &&
3605 "Expected Reg to only have one non-debug use?");
3606 Register MaybeLoad;
3607 int64_t Shift;
3608 if (!mi_match(Reg, MRI,
3609 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3610 Shift = 0;
3611 MaybeLoad = Reg;
3612 }
3613
3614 if (Shift % MemSizeInBits != 0)
3615 return std::nullopt;
3616
3617 // TODO: Handle other types of loads.
3618 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI);
3619 if (!Load)
3620 return std::nullopt;
3621
3622 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3623 return std::nullopt;
3624
3625 return std::make_pair(Load, Shift / MemSizeInBits);
3626}
3627
3628std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3629CombinerHelper::findLoadOffsetsForLoadOrCombine(
3631 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3632
3633 // Each load found for the pattern. There should be one for each RegsToVisit.
3635
3636 // The lowest index used in any load. (The lowest "i" for each x[i].)
3637 int64_t LowestIdx = INT64_MAX;
3638
3639 // The load which uses the lowest index.
3640 GZExtLoad *LowestIdxLoad = nullptr;
3641
3642 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3643 SmallSet<int64_t, 8> SeenIdx;
3644
3645 // Ensure each load is in the same MBB.
3646 // TODO: Support multiple MachineBasicBlocks.
3647 MachineBasicBlock *MBB = nullptr;
3648 const MachineMemOperand *MMO = nullptr;
3649
3650 // Earliest instruction-order load in the pattern.
3651 GZExtLoad *EarliestLoad = nullptr;
3652
3653 // Latest instruction-order load in the pattern.
3654 GZExtLoad *LatestLoad = nullptr;
3655
3656 // Base pointer which every load should share.
3658
3659 // We want to find a load for each register. Each load should have some
3660 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3661 // track of the load which uses the lowest index. Later, we will check if we
3662 // can use its pointer in the final, combined load.
3663 for (auto Reg : RegsToVisit) {
3664 // Find the load, and find the position that it will end up in (e.g. a
3665 // shifted) value.
3666 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3667 if (!LoadAndPos)
3668 return std::nullopt;
3669 GZExtLoad *Load;
3670 int64_t DstPos;
3671 std::tie(Load, DstPos) = *LoadAndPos;
3672
3673 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3674 // it is difficult to check for stores/calls/etc between loads.
3675 MachineBasicBlock *LoadMBB = Load->getParent();
3676 if (!MBB)
3677 MBB = LoadMBB;
3678 if (LoadMBB != MBB)
3679 return std::nullopt;
3680
3681 // Make sure that the MachineMemOperands of every seen load are compatible.
3682 auto &LoadMMO = Load->getMMO();
3683 if (!MMO)
3684 MMO = &LoadMMO;
3685 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace())
3686 return std::nullopt;
3687
3688 // Find out what the base pointer and index for the load is.
3689 Register LoadPtr;
3690 int64_t Idx;
3691 if (!mi_match(Load->getOperand(1).getReg(), MRI,
3692 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3693 LoadPtr = Load->getOperand(1).getReg();
3694 Idx = 0;
3695 }
3696
3697 // Don't combine things like a[i], a[i] -> a bigger load.
3698 if (!SeenIdx.insert(Idx).second)
3699 return std::nullopt;
3700
3701 // Every load must share the same base pointer; don't combine things like:
3702 //
3703 // a[i], b[i + 1] -> a bigger load.
3704 if (!BasePtr.isValid())
3705 BasePtr = LoadPtr;
3706 if (BasePtr != LoadPtr)
3707 return std::nullopt;
3708
3709 if (Idx < LowestIdx) {
3710 LowestIdx = Idx;
3711 LowestIdxLoad = Load;
3712 }
3713
3714 // Keep track of the byte offset that this load ends up at. If we have seen
3715 // the byte offset, then stop here. We do not want to combine:
3716 //
3717 // a[i] << 16, a[i + k] << 16 -> a bigger load.
3718 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3719 return std::nullopt;
3720 Loads.insert(Load);
3721
3722 // Keep track of the position of the earliest/latest loads in the pattern.
3723 // We will check that there are no load fold barriers between them later
3724 // on.
3725 //
3726 // FIXME: Is there a better way to check for load fold barriers?
3727 if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3728 EarliestLoad = Load;
3729 if (!LatestLoad || dominates(*LatestLoad, *Load))
3730 LatestLoad = Load;
3731 }
3732
3733 // We found a load for each register. Let's check if each load satisfies the
3734 // pattern.
3735 assert(Loads.size() == RegsToVisit.size() &&
3736 "Expected to find a load for each register?");
3737 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3738 LatestLoad && "Expected at least two loads?");
3739
3740 // Check if there are any stores, calls, etc. between any of the loads. If
3741 // there are, then we can't safely perform the combine.
3742 //
3743 // MaxIter is chosen based off the (worst case) number of iterations it
3744 // typically takes to succeed in the LLVM test suite plus some padding.
3745 //
3746 // FIXME: Is there a better way to check for load fold barriers?
3747 const unsigned MaxIter = 20;
3748 unsigned Iter = 0;
3749 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3750 LatestLoad->getIterator())) {
3751 if (Loads.count(&MI))
3752 continue;
3753 if (MI.isLoadFoldBarrier())
3754 return std::nullopt;
3755 if (Iter++ == MaxIter)
3756 return std::nullopt;
3757 }
3758
3759 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3760}
3761
3763 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3764 assert(MI.getOpcode() == TargetOpcode::G_OR);
3765 MachineFunction &MF = *MI.getMF();
3766 // Assuming a little-endian target, transform:
3767 // s8 *a = ...
3768 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3769 // =>
3770 // s32 val = *((i32)a)
3771 //
3772 // s8 *a = ...
3773 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3774 // =>
3775 // s32 val = BSWAP(*((s32)a))
3776 Register Dst = MI.getOperand(0).getReg();
3777 LLT Ty = MRI.getType(Dst);
3778 if (Ty.isVector())
3779 return false;
3780
3781 // We need to combine at least two loads into this type. Since the smallest
3782 // possible load is into a byte, we need at least a 16-bit wide type.
3783 const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3784 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3785 return false;
3786
3787 // Match a collection of non-OR instructions in the pattern.
3788 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3789 if (!RegsToVisit)
3790 return false;
3791
3792 // We have a collection of non-OR instructions. Figure out how wide each of
3793 // the small loads should be based off of the number of potential loads we
3794 // found.
3795 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3796 if (NarrowMemSizeInBits % 8 != 0)
3797 return false;
3798
3799 // Check if each register feeding into each OR is a load from the same
3800 // base pointer + some arithmetic.
3801 //
3802 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3803 //
3804 // Also verify that each of these ends up putting a[i] into the same memory
3805 // offset as a load into a wide type would.
3807 GZExtLoad *LowestIdxLoad, *LatestLoad;
3808 int64_t LowestIdx;
3809 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3810 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3811 if (!MaybeLoadInfo)
3812 return false;
3813 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3814
3815 // We have a bunch of loads being OR'd together. Using the addresses + offsets
3816 // we found before, check if this corresponds to a big or little endian byte
3817 // pattern. If it does, then we can represent it using a load + possibly a
3818 // BSWAP.
3819 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3820 std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3821 if (!IsBigEndian)
3822 return false;
3823 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3824 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3825 return false;
3826
3827 // Make sure that the load from the lowest index produces offset 0 in the
3828 // final value.
3829 //
3830 // This ensures that we won't combine something like this:
3831 //
3832 // load x[i] -> byte 2
3833 // load x[i+1] -> byte 0 ---> wide_load x[i]
3834 // load x[i+2] -> byte 1
3835 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3836 const unsigned ZeroByteOffset =
3837 *IsBigEndian
3838 ? bigEndianByteAt(NumLoadsInTy, 0)
3839 : littleEndianByteAt(NumLoadsInTy, 0);
3840 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3841 if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3842 ZeroOffsetIdx->second != LowestIdx)
3843 return false;
3844
3845 // We wil reuse the pointer from the load which ends up at byte offset 0. It
3846 // may not use index 0.
3847 Register Ptr = LowestIdxLoad->getPointerReg();
3848 const MachineMemOperand &MMO = LowestIdxLoad->getMMO();
3849 LegalityQuery::MemDesc MMDesc(MMO);
3850 MMDesc.MemoryTy = Ty;
3852 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3853 return false;
3854 auto PtrInfo = MMO.getPointerInfo();
3855 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3856
3857 // Load must be allowed and fast on the target.
3859 auto &DL = MF.getDataLayout();
3860 unsigned Fast = 0;
3861 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3862 !Fast)
3863 return false;
3864
3865 MatchInfo = [=](MachineIRBuilder &MIB) {
3866 MIB.setInstrAndDebugLoc(*LatestLoad);
3867 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3868 MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3869 if (NeedsBSwap)
3870 MIB.buildBSwap(Dst, LoadDst);
3871 };
3872 return true;
3873}
3874
3876 MachineInstr *&ExtMI) {
3877 assert(MI.getOpcode() == TargetOpcode::G_PHI);
3878
3879 Register DstReg = MI.getOperand(0).getReg();
3880
3881 // TODO: Extending a vector may be expensive, don't do this until heuristics
3882 // are better.
3883 if (MRI.getType(DstReg).isVector())
3884 return false;
3885
3886 // Try to match a phi, whose only use is an extend.
3887 if (!MRI.hasOneNonDBGUse(DstReg))
3888 return false;
3889 ExtMI = &*MRI.use_instr_nodbg_begin(DstReg);
3890 switch (ExtMI->getOpcode()) {
3891 case TargetOpcode::G_ANYEXT:
3892 return true; // G_ANYEXT is usually free.
3893 case TargetOpcode::G_ZEXT:
3894 case TargetOpcode::G_SEXT:
3895 break;
3896 default:
3897 return false;
3898 }
3899
3900 // If the target is likely to fold this extend away, don't propagate.
3902 return false;
3903
3904 // We don't want to propagate the extends unless there's a good chance that
3905 // they'll be optimized in some way.
3906 // Collect the unique incoming values.
3908 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
3909 auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI);
3910 switch (DefMI->getOpcode()) {
3911 case TargetOpcode::G_LOAD:
3912 case TargetOpcode::G_TRUNC:
3913 case TargetOpcode::G_SEXT:
3914 case TargetOpcode::G_ZEXT:
3915 case TargetOpcode::G_ANYEXT:
3916 case TargetOpcode::G_CONSTANT:
3917 InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI));
3918 // Don't try to propagate if there are too many places to create new
3919 // extends, chances are it'll increase code size.
3920 if (InSrcs.size() > 2)
3921 return false;
3922 break;
3923 default:
3924 return false;
3925 }
3926 }
3927 return true;
3928}
3929
3931 MachineInstr *&ExtMI) {
3932 assert(MI.getOpcode() == TargetOpcode::G_PHI);
3933 Register DstReg = ExtMI->getOperand(0).getReg();
3934 LLT ExtTy = MRI.getType(DstReg);
3935
3936 // Propagate the extension into the block of each incoming reg's block.
3937 // Use a SetVector here because PHIs can have duplicate edges, and we want
3938 // deterministic iteration order.
3941 for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) {
3942 auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg());
3943 if (!SrcMIs.insert(SrcMI))
3944 continue;
3945
3946 // Build an extend after each src inst.
3947 auto *MBB = SrcMI->getParent();
3948 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator();
3949 if (InsertPt != MBB->end() && InsertPt->isPHI())
3950 InsertPt = MBB->getFirstNonPHI();
3951
3952 Builder.setInsertPt(*SrcMI->getParent(), InsertPt);
3953 Builder.setDebugLoc(MI.getDebugLoc());
3954 auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy,
3955 SrcMI->getOperand(0).getReg());
3956 OldToNewSrcMap[SrcMI] = NewExt;
3957 }
3958
3959 // Create a new phi with the extended inputs.
3961 auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI);
3962 NewPhi.addDef(DstReg);
3963 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) {
3964 if (!MO.isReg()) {
3965 NewPhi.addMBB(MO.getMBB());
3966 continue;
3967 }
3968 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())];
3969 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3970 }
3971 Builder.insertInstr(NewPhi);
3972 ExtMI->eraseFromParent();
3973}
3974
3976 Register &Reg) {
3977 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3978 // If we have a constant index, look for a G_BUILD_VECTOR source
3979 // and find the source register that the index maps to.
3980 Register SrcVec = MI.getOperand(1).getReg();
3981 LLT SrcTy = MRI.getType(SrcVec);
3982
3983 auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
3984 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements())
3985 return false;
3986
3987 unsigned VecIdx = Cst->Value.getZExtValue();
3988
3989 // Check if we have a build_vector or build_vector_trunc with an optional
3990 // trunc in front.
3991 MachineInstr *SrcVecMI = MRI.getVRegDef(SrcVec);
3992 if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) {
3993 SrcVecMI = MRI.getVRegDef(SrcVecMI->getOperand(1).getReg());
3994 }
3995
3996 if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
3997 SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
3998 return false;
3999
4000 EVT Ty(getMVTForLLT(SrcTy));
4001 if (!MRI.hasOneNonDBGUse(SrcVec) &&
4002 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty))
4003 return false;
4004
4005 Reg = SrcVecMI->getOperand(VecIdx + 1).getReg();
4006 return true;
4007}
4008
4010 Register &Reg) {
4011 // Check the type of the register, since it may have come from a
4012 // G_BUILD_VECTOR_TRUNC.
4013 LLT ScalarTy = MRI.getType(Reg);
4014 Register DstReg = MI.getOperand(0).getReg();
4015 LLT DstTy = MRI.getType(DstReg);
4016
4018 if (ScalarTy != DstTy) {
4019 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits());
4020 Builder.buildTrunc(DstReg, Reg);
4021 MI.eraseFromParent();
4022 return;
4023 }
4025}
4026
4029 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
4030 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4031 // This combine tries to find build_vector's which have every source element
4032 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
4033 // the masked load scalarization is run late in the pipeline. There's already
4034 // a combine for a similar pattern starting from the extract, but that
4035 // doesn't attempt to do it if there are multiple uses of the build_vector,
4036 // which in this case is true. Starting the combine from the build_vector
4037 // feels more natural than trying to find sibling nodes of extracts.
4038 // E.g.
4039 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
4040 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
4041 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
4042 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
4043 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
4044 // ==>
4045 // replace ext{1,2,3,4} with %s{1,2,3,4}
4046
4047 Register DstReg = MI.getOperand(0).getReg();
4048 LLT DstTy = MRI.getType(DstReg);
4049 unsigned NumElts = DstTy.getNumElements();
4050
4051 SmallBitVector ExtractedElts(NumElts);
4052 for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) {
4053 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4054 return false;
4055 auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI);
4056 if (!Cst)
4057 return false;
4058 unsigned Idx = Cst->getZExtValue();
4059 if (Idx >= NumElts)
4060 return false; // Out of range.
4061 ExtractedElts.set(Idx);
4062 SrcDstPairs.emplace_back(
4063 std::make_pair(MI.getOperand(Idx + 1).getReg(), &II));
4064 }
4065 // Match if every element was extracted.
4066 return ExtractedElts.all();
4067}
4068
4071 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
4072 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4073 for (auto &Pair : SrcDstPairs) {
4074 auto *ExtMI = Pair.second;
4075 replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first);
4076 ExtMI->eraseFromParent();
4077 }
4078 MI.eraseFromParent();
4079}
4080
4082 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4084 MatchInfo(Builder);
4085 MI.eraseFromParent();
4086}
4087
4089 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4091 MatchInfo(Builder);
4092}
4093
4095 BuildFnTy &MatchInfo) {
4096 assert(MI.getOpcode() == TargetOpcode::G_OR);
4097
4098 Register Dst = MI.getOperand(0).getReg();
4099 LLT Ty = MRI.getType(Dst);
4100 unsigned BitWidth = Ty.getScalarSizeInBits();
4101
4102 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4103 unsigned FshOpc = 0;
4104
4105 // Match (or (shl ...), (lshr ...)).
4106 if (!mi_match(Dst, MRI,
4107 // m_GOr() handles the commuted version as well.
4108 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)),
4109 m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt)))))
4110 return false;
4111
4112 // Given constants C0 and C1 such that C0 + C1 is bit-width:
4113 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
4114 int64_t CstShlAmt, CstLShrAmt;
4115 if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) &&
4116 mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) &&
4117 CstShlAmt + CstLShrAmt == BitWidth) {
4118 FshOpc = TargetOpcode::G_FSHR;
4119 Amt = LShrAmt;
4120
4121 } else if (mi_match(LShrAmt, MRI,
4123 ShlAmt == Amt) {
4124 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
4125 FshOpc = TargetOpcode::G_FSHL;
4126
4127 } else if (mi_match(ShlAmt, MRI,
4129 LShrAmt == Amt) {
4130 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
4131 FshOpc = TargetOpcode::G_FSHR;
4132
4133 } else {
4134 return false;
4135 }
4136
4137 LLT AmtTy = MRI.getType(Amt);
4138 if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}}))
4139 return false;
4140
4141 MatchInfo = [=](MachineIRBuilder &B) {
4142 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4143 };
4144 return true;
4145}
4146
4147/// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
4149 unsigned Opc = MI.getOpcode();
4150 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4151 Register X = MI.getOperand(1).getReg();
4152 Register Y = MI.getOperand(2).getReg();
4153 if (X != Y)
4154 return false;
4155 unsigned RotateOpc =
4156 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4157 return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
4158}
4159
4161 unsigned Opc = MI.getOpcode();
4162 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4163 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4165 MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL
4166 : TargetOpcode::G_ROTR));
4167 MI.removeOperand(2);
4169}
4170
4171// Fold (rot x, c) -> (rot x, c % BitSize)
4173 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4174 MI.getOpcode() == TargetOpcode::G_ROTR);
4175 unsigned Bitsize =
4176 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4177 Register AmtReg = MI.getOperand(2).getReg();
4178 bool OutOfRange = false;
4179 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) {
4180 if (auto *CI = dyn_cast<ConstantInt>(C))
4181 OutOfRange |= CI->getValue().uge(Bitsize);
4182 return true;
4183 };
4184 return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
4185}
4186
4188 assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
4189 MI.getOpcode() == TargetOpcode::G_ROTR);
4190 unsigned Bitsize =
4191 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits();
4193 Register Amt = MI.getOperand(2).getReg();
4194 LLT AmtTy = MRI.getType(Amt);
4195 auto Bits = Builder.buildConstant(AmtTy, Bitsize);
4196 Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0);
4198 MI.getOperand(2).setReg(Amt);
4200}
4201
4203 int64_t &MatchInfo) {
4204 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4205 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4206 auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg());
4207 auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg());
4208 std::optional<bool> KnownVal;
4209 switch (Pred) {
4210 default:
4211 llvm_unreachable("Unexpected G_ICMP predicate?");
4212 case CmpInst::ICMP_EQ:
4213 KnownVal = KnownBits::eq(KnownLHS, KnownRHS);
4214 break;
4215 case CmpInst::ICMP_NE:
4216 KnownVal = KnownBits::ne(KnownLHS, KnownRHS);
4217 break;
4218 case CmpInst::ICMP_SGE:
4219 KnownVal = KnownBits::sge(KnownLHS, KnownRHS);
4220 break;
4221 case CmpInst::ICMP_SGT:
4222 KnownVal = KnownBits::sgt(KnownLHS, KnownRHS);
4223 break;
4224 case CmpInst::ICMP_SLE:
4225 KnownVal = KnownBits::sle(KnownLHS, KnownRHS);
4226 break;
4227 case CmpInst::ICMP_SLT:
4228 KnownVal = KnownBits::slt(KnownLHS, KnownRHS);
4229 break;
4230 case CmpInst::ICMP_UGE:
4231 KnownVal = KnownBits::uge(KnownLHS, KnownRHS);
4232 break;
4233 case CmpInst::ICMP_UGT:
4234 KnownVal = KnownBits::ugt(KnownLHS, KnownRHS);
4235 break;
4236 case CmpInst::ICMP_ULE:
4237 KnownVal = KnownBits::ule(KnownLHS, KnownRHS);
4238 break;
4239 case CmpInst::ICMP_ULT:
4240 KnownVal = KnownBits::ult(KnownLHS, KnownRHS);
4241 break;
4242 }
4243 if (!KnownVal)
4244 return false;
4245 MatchInfo =
4246 *KnownVal
4248 /*IsVector = */
4249 MRI.getType(MI.getOperand(0).getReg()).isVector(),
4250 /* IsFP = */ false)
4251 : 0;
4252 return true;
4253}
4254
4256 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4257 assert(MI.getOpcode() == TargetOpcode::G_ICMP);
4258 // Given:
4259 //
4260 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4261 // %cmp = G_ICMP ne %x, 0
4262 //
4263 // Or:
4264 //
4265 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4266 // %cmp = G_ICMP eq %x, 1
4267 //
4268 // We can replace %cmp with %x assuming true is 1 on the target.
4269 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
4270 if (!CmpInst::isEquality(Pred))
4271 return false;
4272 Register Dst = MI.getOperand(0).getReg();
4273 LLT DstTy = MRI.getType(Dst);
4275 /* IsFP = */ false) != 1)
4276 return false;
4277 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ;
4278 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero)))
4279 return false;
4280 Register LHS = MI.getOperand(2).getReg();
4281 auto KnownLHS = KB->getKnownBits(LHS);
4282 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4283 return false;
4284 // Make sure replacing Dst with the LHS is a legal operation.
4285 LLT LHSTy = MRI.getType(LHS);
4286 unsigned LHSSize = LHSTy.getSizeInBits();
4287 unsigned DstSize = DstTy.getSizeInBits();
4288 unsigned Op = TargetOpcode::COPY;
4289 if (DstSize != LHSSize)
4290 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4291 if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}}))
4292 return false;
4293 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); };
4294 return true;
4295}
4296
4297// Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4299 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4300 assert(MI.getOpcode() == TargetOpcode::G_AND);
4301
4302 // Ignore vector types to simplify matching the two constants.
4303 // TODO: do this for vectors and scalars via a demanded bits analysis.
4304 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
4305 if (Ty.isVector())
4306 return false;
4307
4308 Register Src;
4309 Register AndMaskReg;
4310 int64_t AndMaskBits;
4311 int64_t OrMaskBits;
4312 if (!mi_match(MI, MRI,
4313 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)),
4314 m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg)))))
4315 return false;
4316
4317 // Check if OrMask could turn on any bits in Src.
4318 if (AndMaskBits & OrMaskBits)
4319 return false;
4320
4321 MatchInfo = [=, &MI](MachineIRBuilder &B) {
4323 // Canonicalize the result to have the constant on the RHS.
4324 if (MI.getOperand(1).getReg() == AndMaskReg)
4325 MI.getOperand(2).setReg(AndMaskReg);
4326 MI.getOperand(1).setReg(Src);
4328 };
4329 return true;
4330}
4331
4332/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4334 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4335 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4336 Register Dst = MI.getOperand(0).getReg();
4337 Register Src = MI.getOperand(1).getReg();
4338 LLT Ty = MRI.getType(Src);
4340 if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}}))
4341 return false;
4342 int64_t Width = MI.getOperand(2).getImm();
4343 Register ShiftSrc;
4344 int64_t ShiftImm;
4345 if (!mi_match(
4346 Src, MRI,
4347 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)),
4348 m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))))
4349 return false;
4350 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4351 return false;
4352
4353 MatchInfo = [=](MachineIRBuilder &B) {
4354 auto Cst1 = B.buildConstant(ExtractTy, ShiftImm);
4355 auto Cst2 = B.buildConstant(ExtractTy, Width);
4356 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4357 };
4358 return true;
4359}
4360
4361/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4363 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
4364 assert(MI.getOpcode() == TargetOpcode::G_AND);
4365 Register Dst = MI.getOperand(0).getReg();
4366 LLT Ty = MRI.getType(Dst);
4368 if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}}))
4369 return false;