LLVM 23.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
26#include "llvm/IR/InstrTypes.h"
27#include <functional>
28
29namespace llvm {
30
32class APInt;
33class ConstantFP;
34class GPtrAdd;
35class GZExtLoad;
39class MachineInstr;
40class MachineOperand;
43class LegalizerInfo;
44struct LegalityQuery;
45class RegisterBank;
47class TargetInstrInfo;
48class TargetLowering;
50
52 LLT Ty; // The result type of the extend.
53 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
55};
56
61 bool RematOffset = false; // True if Offset is a constant that needs to be
62 // rematerialized before the new load/store.
63 bool IsPre = false;
64};
65
67 int64_t Imm;
70 unsigned Flags;
71};
72
75 int64_t Imm;
76};
77
84
93
94using BuildFnTy = std::function<void(MachineIRBuilder &)>;
95
97 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
99 unsigned Opcode = 0; /// The opcode for the produced instruction.
100 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
104};
105
107 /// Describes instructions to be built during a combine.
111 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
113};
114
116protected:
127
128public:
130 bool IsPreLegalize, GISelValueTracking *VT = nullptr,
131 MachineDominatorTree *MDT = nullptr,
132 const LegalizerInfo *LI = nullptr);
133
135
137 return Builder;
138 }
139
140 const TargetInstrInfo &getTII() const { return *TII; }
141
142 const TargetRegisterInfo &getTRI() const { return *TRI; }
143
144 const RegisterBankInfo &getRBI() const { return *RBI; }
145
146 const TargetLowering &getTargetLowering() const;
147
148 const MachineFunction &getMachineFunction() const;
149
150 const DataLayout &getDataLayout() const;
151
152 LLVMContext &getContext() const;
153
154 /// \returns true if the combiner is running pre-legalization.
155 bool isPreLegalize() const;
156
157 /// \returns true if \p Query is legal on the target.
158 bool isLegal(const LegalityQuery &Query) const;
159
160 /// \return true if the combine is running prior to legalization, or if \p
161 /// Query is legal on the target.
162 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
163
164 /// \return true if \p Query is legal on the target, or if \p Query will
165 /// perform WidenScalar action on the target.
166 bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const;
167
168 /// \return true if \p Query is legal on the target, or if \p Query will
169 /// perform a FewerElements action on the target.
170 bool isLegalOrHasFewerElements(const LegalityQuery &Query) const;
171
172 /// \return true if the combine is running prior to legalization, or if \p Ty
173 /// is a legal integer constant type on the target.
174 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
175
176 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
177 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
178
179 /// Replace a single register operand with a new register and inform the
180 /// observer of the changes.
182 Register ToReg) const;
183
184 /// Replace the opcode in instruction with a new opcode and inform the
185 /// observer of the changes.
186 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
187
188 /// Get the register bank of \p Reg.
189 /// If Reg has not been assigned a register, a register class,
190 /// or a register bank, then this returns nullptr.
191 ///
192 /// \pre Reg.isValid()
193 const RegisterBank *getRegBank(Register Reg) const;
194
195 /// Set the register bank of \p Reg.
196 /// Does nothing if the RegBank is null.
197 /// This is the counterpart to getRegBank.
198 void setRegBank(Register Reg, const RegisterBank *RegBank) const;
199
200 /// If \p MI is COPY, try to combine it.
201 /// Returns true if MI changed.
202 bool tryCombineCopy(MachineInstr &MI) const;
203 bool matchCombineCopy(MachineInstr &MI) const;
204 void applyCombineCopy(MachineInstr &MI) const;
205
206 /// Returns true if \p DefMI precedes \p UseMI or they are the same
207 /// instruction. Both must be in the same basic block.
208 bool isPredecessor(const MachineInstr &DefMI,
209 const MachineInstr &UseMI) const;
210
211 /// Returns true if \p DefMI dominates \p UseMI. By definition an
212 /// instruction dominates itself.
213 ///
214 /// If we haven't been provided with a MachineDominatorTree during
215 /// construction, this function returns a conservative result that tracks just
216 /// a single basic block.
217 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const;
218
219 /// If \p MI is extend that consumes the result of a load, try to combine it.
220 /// Returns true if MI changed.
223 PreferredTuple &MatchInfo) const;
225 PreferredTuple &MatchInfo) const;
226
227 /// Match (and (load x), mask) -> zextload x
229 BuildFnTy &MatchInfo) const;
230
231 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
232 /// load.
234 BuildFnTy &MatchInfo) const;
235
237 IndexedLoadStoreMatchInfo &MatchInfo) const;
239 IndexedLoadStoreMatchInfo &MatchInfo) const;
240
243
244 /// Match sext_inreg(load p), imm -> sextload p
246 std::tuple<Register, unsigned> &MatchInfo) const;
248 std::tuple<Register, unsigned> &MatchInfo) const;
249
250 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
251 /// when their source operands are identical.
252 bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
253 void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
254
255 /// If a brcond's true block is not the fallthrough, make it so by inverting
256 /// the condition and swapping operands.
258 MachineInstr *&BrCond) const;
260 MachineInstr *&BrCond) const;
261
262 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
263 /// Returns true if MI changed.
264 /// Right now, we support:
265 /// - concat_vector(undef, undef) => undef
266 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
267 /// build_vector(A, B, C, D)
268 /// ==========================================================
269 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
270 /// can be flattened into a build_vector.
271 /// In the first case \p Ops will be empty
272 /// In the second case \p Ops will contain the operands
273 /// needed to produce the flattened build_vector.
274 ///
275 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
278 /// Replace \p MI with a flattened build_vector with \p Ops
279 /// or an implicit_def if \p Ops is empty.
282
285 /// Replace \p MI with a flattened build_vector with \p Ops
286 /// or an implicit_def if \p Ops is empty.
289
290 /// Replace \p MI with a build_vector.
292
293 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
294 /// Returns true if MI changed.
295 ///
296 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
298 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
299 /// concat_vectors.
300 /// \p Ops will contain the operands needed to produce the flattened
301 /// concat_vectors.
302 ///
303 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
306 /// Replace \p MI with a concat_vectors with \p Ops.
308 ArrayRef<Register> Ops) const;
309
310 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
311 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
312 ///
313 /// For example (pre-indexed):
314 ///
315 /// $addr = G_PTR_ADD $base, $offset
316 /// [...]
317 /// $val = G_LOAD $addr
318 /// [...]
319 /// $whatever = COPY $addr
320 ///
321 /// -->
322 ///
323 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
324 /// [...]
325 /// $whatever = COPY $addr
326 ///
327 /// or (post-indexed):
328 ///
329 /// G_STORE $val, $base
330 /// [...]
331 /// $addr = G_PTR_ADD $base, $offset
332 /// [...]
333 /// $whatever = COPY $addr
334 ///
335 /// -->
336 ///
337 /// $addr = G_INDEXED_STORE $val, $base, $offset
338 /// [...]
339 /// $whatever = COPY $addr
340 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0) const;
341
342 bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
343 void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
344
345 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
346 bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
347 void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
348
349 /// If we have a shift-by-constant of a bitwise logic op that itself has a
350 /// shift-by-constant operand with identical opcode, we may be able to convert
351 /// that into 2 independent shifts followed by the logic op.
353 ShiftOfShiftedLogic &MatchInfo) const;
355 ShiftOfShiftedLogic &MatchInfo) const;
356
357 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
358
359 /// Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
361 MachineInstr &ShiftMI) const;
363 LshrOfTruncOfLshr &MatchInfo) const;
364
365 /// Transform a multiply by a power-of-2 value to a left shift.
366 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
367 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
368
369 // Transform a G_SUB with constant on the RHS to G_ADD.
370 bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
371
372 // Transform a G_SHL with an extended source into a narrower shift if
373 // possible.
375 RegisterImmPair &MatchData) const;
377 const RegisterImmPair &MatchData) const;
378
379 /// Fold away a merge of an unmerge of the corresponding values.
380 bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const;
381
382 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
383 /// type. This will not produce a shift smaller than \p TargetShiftSize.
384 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
385 unsigned &ShiftVal) const;
387 const unsigned &ShiftVal) const;
389 unsigned TargetShiftAmount) const;
390
391 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
393 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
395 MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
396
397 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
399 SmallVectorImpl<APInt> &Csts) const;
401 SmallVectorImpl<APInt> &Csts) const;
402
403 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
406 std::function<void(MachineIRBuilder &)> &MatchInfo) const;
407
408 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
411
412 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
415
416 /// Transform fp_instr(cst) to constant result of the fp operation.
418 const ConstantFP *Cst) const;
419
420 /// Constant fold a unary integer op (G_CTLZ, G_CTTZ, G_CTPOP and their
421 /// _ZERO_POISON variants, G_ABS, G_BSWAP, G_BITREVERSE) when the operand is
422 /// a scalar constant or a G_BUILD_VECTOR of constants.
424 BuildFnTy &MatchInfo) const;
425
426 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
429
430 /// Transform PtrToInt(IntToPtr(x)) to x.
432
433 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
434 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
435 bool
437 std::pair<Register, bool> &PtrRegAndCommute) const;
438 void
440 std::pair<Register, bool> &PtrRegAndCommute) const;
441
442 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
445
446 /// Transform anyext(trunc(x)) to x.
448
449 /// Transform zext(trunc(x)) to x.
451
452 /// Transform trunc (shl x, K) to shl (trunc x), K
453 /// if K < VT.getScalarSizeInBits().
454 ///
455 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
456 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
457 /// MidVT is obtained by finding a legal type between the trunc's src and dst
458 /// types.
459 bool
461 std::pair<MachineInstr *, LLT> &MatchInfo) const;
462 void
464 std::pair<MachineInstr *, LLT> &MatchInfo) const;
465
466 /// Return true if any explicit use operand on \p MI is defined by a
467 /// G_IMPLICIT_DEF.
469
470 /// Return true if all register explicit use operands on \p MI are defined by
471 /// a G_IMPLICIT_DEF.
473
474 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
476
477 /// Return true if a G_STORE instruction \p MI is storing an undef value.
478 bool matchUndefStore(MachineInstr &MI) const;
479
480 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
482
483 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
485
486 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
487 /// true, \p OpIdx will store the operand index of the known selected value.
488 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const;
489
490 /// Replace an instruction with a G_FCONSTANT with value \p C.
491 void replaceInstWithFConstant(MachineInstr &MI, double C) const;
492
493 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
495
496 /// Replace an instruction with a G_CONSTANT with value \p C.
497 void replaceInstWithConstant(MachineInstr &MI, int64_t C) const;
498
499 /// Replace an instruction with a G_CONSTANT with value \p C.
501
502 /// Replace an instruction with a G_IMPLICIT_DEF.
504
505 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
507
508 /// Delete \p MI and replace all of its uses with \p Replacement.
510 Register Replacement) const;
511
512 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
513 /// @param MI
515
516 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
517 /// equivalent instructions.
518 bool matchEqualDefs(const MachineOperand &MOP1,
519 const MachineOperand &MOP2) const;
520
521 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
522 /// \p C.
523 bool matchConstantOp(const MachineOperand &MOP, int64_t C) const;
524
525 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
526 /// equal to \p C.
527 bool matchConstantFPOp(const MachineOperand &MOP, double C) const;
528
529 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
530 /// @param ConstIdx Index of the constant
531 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const;
532
533 /// Optimize (cond ? x : x) -> x
535
536 /// Optimize (x op x) -> x
537 bool matchBinOpSameVal(MachineInstr &MI) const;
538
539 /// Check if operand \p OpIdx is undef.
540 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
541
542 /// Check if operand \p OpIdx is known to be a power of 2.
544 unsigned OpIdx) const;
545
546 /// Erase \p MI
547 void eraseInst(MachineInstr &MI) const;
548
549 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
551 std::tuple<Register, Register> &MatchInfo) const;
553 std::tuple<Register, Register> &MatchInfo) const;
554
555 /// Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`
556 bool matchBinopWithNeg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
557
558 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
560 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
561
562 /// Replace \p MI with a series of instructions described in \p MatchInfo.
564 InstructionStepsMatchInfo &MatchInfo) const;
565
566 /// Match ashr (shl x, C), C -> sext_inreg (C)
568 std::tuple<Register, int64_t> &MatchInfo) const;
570 std::tuple<Register, int64_t> &MatchInfo) const;
571
572 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
573 bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
574
575 /// \return true if \p MI is a G_AND instruction whose operands are x and y
576 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
577 ///
578 /// \param [in] MI - The G_AND instruction.
579 /// \param [out] Replacement - A register the G_AND should be replaced with on
580 /// success.
581 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const;
582
583 /// \return true if \p MI is a G_OR instruction whose operands are x and y
584 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
585 /// value.)
586 ///
587 /// \param [in] MI - The G_OR instruction.
588 /// \param [out] Replacement - A register the G_OR should be replaced with on
589 /// success.
590 bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const;
591
592 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
594
595 /// Combine inverting a result of a compare into the opposite cond code.
597 SmallVectorImpl<Register> &RegsToNegate) const;
599 SmallVectorImpl<Register> &RegsToNegate) const;
600
601 /// Fold (xor (and x, y), y) -> (and (not x), y)
602 ///{
604 std::pair<Register, Register> &MatchInfo) const;
606 std::pair<Register, Register> &MatchInfo) const;
607 ///}
608
609 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
610 bool matchPtrAddZero(MachineInstr &MI) const;
611 void applyPtrAddZero(MachineInstr &MI) const;
612
613 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
615
616 /// Push a binary operator through a select on constants.
617 ///
618 /// binop (select cond, K0, K1), K2 ->
619 /// select cond, (binop K0, K2), (binop K1, K2)
620 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const;
622 const unsigned &SelectOpNo) const;
623
625 SmallVectorImpl<Register> &MatchInfo) const;
626
628 SmallVectorImpl<Register> &MatchInfo) const;
629
630 /// Match expression trees of the form
631 ///
632 /// \code
633 /// sN *a = ...
634 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
635 /// \endcode
636 ///
637 /// And check if the tree can be replaced with a M-bit load + possibly a
638 /// bswap.
639 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const;
640
643
646
649 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
652 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
653
654 /// Use a function which takes in a MachineIRBuilder to perform a combine.
655 /// By default, it erases the instruction \p MI from the function.
656 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const;
657 /// Use a function which takes in a MachineIRBuilder to perform a combine.
658 /// This variant does not erase \p MI after calling the build function.
659 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const;
660
661 bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants,
662 BuildFnTy &MatchInfo) const;
667
669 Register &UnmergeSrc) const;
672 Register &UnmergeSrc) const;
673
674 bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
675 void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
676
677 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
678 /// or false constant based off of KnownBits information.
680 int64_t &MatchInfo) const;
681
682 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
683 /// KnownBits information.
684 bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const;
685
686 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
687 bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
688
690 BuildFnTy &MatchInfo) const;
691 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
693 BuildFnTy &MatchInfo) const;
694
695 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
697 BuildFnTy &MatchInfo) const;
698
699 /// Match: shr (and x, n), k -> ubfx x, pos, width
701 BuildFnTy &MatchInfo) const;
702
703 // Helpers for reassociation:
705 BuildFnTy &MatchInfo) const;
708 BuildFnTy &MatchInfo) const;
711 BuildFnTy &MatchInfo) const;
712 /// Reassociate pointer calculations with G_ADD involved, to allow better
713 /// addressing mode usage.
714 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
715
716 /// Try to reassociate to reassociate operands of a commutative binop.
717 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
718 Register Op1, BuildFnTy &MatchInfo) const;
719 /// Reassociate commutative binary operations like G_ADD.
720 bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const;
721
722 /// Do constant folding when opportunities are exposed after MIR building.
723 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const;
724
725 /// Do constant folding when opportunities are exposed after MIR building.
726 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const;
727
728 /// Do constant FP folding when opportunities are exposed after MIR building.
729 bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const;
730
731 /// Constant fold G_FMA/G_FMAD.
732 bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const;
733
734 /// \returns true if it is possible to narrow the width of a scalar binop
735 /// feeding a G_AND instruction \p MI.
736 bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
737
738 /// Given an G_UDIV \p MI or G_UREM \p MI expressing a divide by constant,
739 /// return an expression that implements it by multiplying by a magic number.
740 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
742 /// Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
745
746 /// Given an G_SDIV \p MI or G_SREM \p MI expressing a signed divide by
747 /// constant, return an expression that implements it by multiplying by a
748 /// magic number. Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's
749 /// Guide".
751 /// Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
754
755 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
756 /// return expressions that implements it by shifting.
757 bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const;
758 void applySDivByPow2(MachineInstr &MI) const;
759 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
760 /// return expressions that implements it by shifting.
761 void applyUDivByPow2(MachineInstr &MI) const;
762
763 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
764 bool matchUMulHToLShr(MachineInstr &MI) const;
765 void applyUMulHToLShr(MachineInstr &MI) const;
766
767 // Combine trunc(smin(smax(x, C1), C2)) -> truncssat_s(x)
768 // or trunc(smax(smin(x, C2), C1)) -> truncssat_s(x).
769 bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
770 void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const;
771
772 // Combine trunc(smin(smax(x, 0), C)) -> truncssat_u(x)
773 // or trunc(smax(smin(x, C), 0)) -> truncssat_u(x)
774 // or trunc(umin(smax(x, 0), C)) -> truncssat_u(x)
775 bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
776 void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const;
777
778 // Combine trunc(umin(x, C)) -> truncusat_u(x).
779 bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const;
780
781 // Combine truncusat_u(fptoui(x)) -> fptoui_sat(x)
783
784 /// Try to transform \p MI by using all of the above
785 /// combine functions. Returns true if changed.
787
788 /// Emit loads and stores that perform the given memcpy.
789 /// Assumes \p MI is a G_MEMCPY_INLINE
790 /// TODO: implement dynamically sized inline memcpy,
791 /// and rename: s/bool tryEmit/void emit/
793
794 /// Match:
795 /// (G_UMULO x, 2) -> (G_UADDO x, x)
796 /// (G_SMULO x, 2) -> (G_SADDO x, x)
797 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const;
798
799 /// Match:
800 /// (G_*MULO x, 0) -> 0 + no carry out
801 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const;
802
803 /// Match:
804 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
805 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
806 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const;
807
808 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
809 /// (fadd fneg(x), y) -> (fsub y, x)
810 /// (fsub x, fneg(y)) -> (fadd x, y)
811 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
812 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
813 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
814 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
815 bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const;
816
817 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
818 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
819
820 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
821 bool &HasFMAD, bool &Aggressive,
822 bool CanReassociate = false) const;
823
824 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
825 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
827 BuildFnTy &MatchInfo) const;
828
829 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
830 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
832 BuildFnTy &MatchInfo) const;
833
834 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
835 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
837 BuildFnTy &MatchInfo) const;
838
839 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
840 // -> (fma x, y, (fma (fpext u), (fpext v), z))
841 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
842 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
843 bool
845 BuildFnTy &MatchInfo) const;
846
847 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
848 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
850 BuildFnTy &MatchInfo) const;
851
852 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
853 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
855 BuildFnTy &MatchInfo) const;
856
857 /// Transform (fsub (fpext (fmul x, y)), z)
858 /// -> (fma (fpext x), (fpext y), (fneg z))
859 /// (fsub (fpext (fmul x, y)), z)
860 /// -> (fmad (fpext x), (fpext y), (fneg z))
862 BuildFnTy &MatchInfo) const;
863
864 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
865 /// -> (fneg (fma (fpext x), (fpext y), z))
866 /// (fsub (fpext (fneg (fmul x, y))), z)
867 /// -> (fneg (fmad (fpext x), (fpext y), z))
869 BuildFnTy &MatchInfo) const;
870
871 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const;
872
874 SmallVector<MachineInstr *> &MatchInfo) const;
876
877 /// Transform G_ADD(x, G_SUB(y, x)) to y.
878 /// Transform G_ADD(G_SUB(y, x), x) to y.
879 bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const;
880
882 Register &MatchInfo) const;
883 bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const;
885 Register &MatchInfo) const;
886
887 /// Transform:
888 /// (x + y) - y -> x
889 /// (x + y) - x -> y
890 /// x - (y + x) -> 0 - y
891 /// x - (x + z) -> 0 - z
892 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
893
894 /// \returns true if it is possible to simplify a select instruction \p MI
895 /// to a min/max instruction of some sort.
897 BuildFnTy &MatchInfo) const;
898
899 /// Transform:
900 /// (X + Y) == X -> Y == 0
901 /// (X - Y) == X -> Y == 0
902 /// (X ^ Y) == X -> Y == 0
903 /// (X + Y) != X -> Y != 0
904 /// (X - Y) != X -> Y != 0
905 /// (X ^ Y) != X -> Y != 0
907 BuildFnTy &MatchInfo) const;
908
909 /// Match shifts greater or equal to the range (the bitwidth of the result
910 /// datatype, or the effective bitwidth of the source value).
912 std::optional<int64_t> &MatchInfo) const;
913
914 /// Match constant LHS ops that should be commuted.
916
917 /// Combine sext of trunc.
918 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
919
920 /// Combine zext of trunc.
921 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
922
923 /// Combine zext nneg to sext.
924 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
925
926 /// Match constant LHS FP ops that should be commuted.
928
929 // Given a binop \p MI, commute operands 1 and 2.
931
932 /// Combine select to integer min/max.
933 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
934
935 /// Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
936 bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const;
937
938 /// Combine selects.
939 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const;
940
941 /// Combine ands.
942 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
943
944 /// Combine ors.
945 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const;
946
947 /// trunc (binop X, C) --> binop (trunc X, trunc C).
948 bool matchNarrowBinop(const MachineInstr &TruncMI,
949 const MachineInstr &BinopMI,
950 BuildFnTy &MatchInfo) const;
951
952 bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const;
953
954 /// Combine addos.
955 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const;
956
957 /// Combine extract vector element.
958 bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const;
959
960 /// Combine extract vector element with a build vector on the vector register.
962 const MachineInstr &MI2,
963 BuildFnTy &MatchInfo) const;
964
965 /// Combine extract vector element with a build vector trunc on the vector
966 /// register.
967 bool
969 BuildFnTy &MatchInfo) const;
970
971 /// Combine extract vector element with a shuffle vector on the vector
972 /// register.
974 const MachineInstr &MI2,
975 BuildFnTy &MatchInfo) const;
976
977 /// Combine extract vector element with a insert vector element on the vector
978 /// register and different indices.
979 bool
981 BuildFnTy &MatchInfo) const;
982
983 /// Remove references to rhs if it is undef
984 bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const;
985
986 /// Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not
987 /// reference a.
988 bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
989
990 /// Use a function which takes in a MachineIRBuilder to perform a combine.
991 /// By default, it erases the instruction def'd on \p MO from the function.
992 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
993
994 /// Match FPOWI if it's safe to extend it into a series of multiplications.
995 bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const;
996
997 /// Expands FPOWI into a series of multiplications and a division if the
998 /// exponent is negative.
999 void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const;
1000
1001 /// Combine insert vector element OOB.
1003 BuildFnTy &MatchInfo) const;
1004
1006 BuildFnTy &MatchInfo) const;
1007
1008 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1009
1010 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1011
1012 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1013
1014 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
1015
1016 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
1017 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
1018 BuildFnTy &MatchInfo) const;
1019
1020 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
1021 BuildFnTy &MatchInfo) const;
1023 BuildFnTy &MatchInfo) const;
1024
1026 BuildFnTy &MatchInfo) const;
1027
1029 BuildFnTy &MatchInfo) const;
1030
1032 BuildFnTy &MatchInfo) const;
1033
1034 // fold ((A-C1)+C2) -> (A+(C2-C1))
1036 BuildFnTy &MatchInfo) const;
1037
1038 bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
1039 BuildFnTy &MatchInfo) const;
1040
1041 bool matchCastOfBuildVector(const MachineInstr &CastMI,
1042 const MachineInstr &BVMI,
1043 BuildFnTy &MatchInfo) const;
1044
1046 BuildFnTy &MatchInfo) const;
1048 BuildFnTy &MatchInfo) const;
1049
1050 // unmerge_values(anyext(build vector)) -> build vector(anyext)
1052 BuildFnTy &MatchInfo) const;
1053
1054 // merge_values(_, undef) -> anyext
1055 bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1056
1057 // merge_values(_, zero) -> zext
1058 bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1059
1060 // overflow sub
1061 bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
1062
1063 // (sext_inreg (sext_inreg x, K0), K1)
1065 BuildFnTy &MatchInfo) const;
1066
1067 // (ctlz (xor x, (sra x, bitwidth-1))) -> (add (ctls x), 1) or
1068 // (ctlz (or (shl (xor x, (sra x, bitwidth-1)), 1), 1) -> (ctls x)
1069 bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const;
1070
1071private:
1072 /// Checks for legality of an indexed variant of \p LdSt.
1073 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
1074
1075 /// Helper function for matchBinopWithNeg: tries to match one commuted form
1076 /// of `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`.
1077 bool matchBinopWithNegInner(Register MInner, Register Other, unsigned RootOpc,
1078 Register Dst, LLT Ty, BuildFnTy &MatchInfo) const;
1079 /// Given a non-indexed load or store instruction \p MI, find an offset that
1080 /// can be usefully and legally folded into it as a post-indexing operation.
1081 ///
1082 /// \returns true if a candidate is found.
1083 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1084 Register &Offset, bool &RematOffset) const;
1085
1086 /// Given a non-indexed load or store instruction \p MI, find an offset that
1087 /// can be usefully and legally folded into it as a pre-indexing operation.
1088 ///
1089 /// \returns true if a candidate is found.
1090 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
1091 Register &Offset) const;
1092
1093 /// Helper function for matchLoadOrCombine. Searches for Registers
1094 /// which may have been produced by a load instruction + some arithmetic.
1095 ///
1096 /// \param [in] Root - The search root.
1097 ///
1098 /// \returns The Registers found during the search.
1099 std::optional<SmallVector<Register, 8>>
1100 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
1101
1102 /// Helper function for matchLoadOrCombine.
1103 ///
1104 /// Checks if every register in \p RegsToVisit is defined by a load
1105 /// instruction + some arithmetic.
1106 ///
1107 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
1108 /// at to the index of the load.
1109 /// \param [in] MemSizeInBits - The number of bits each load should produce.
1110 ///
1111 /// \returns On success, a 3-tuple containing lowest-index load found, the
1112 /// lowest index, and the last load in the sequence.
1113 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
1114 findLoadOffsetsForLoadOrCombine(
1116 const SmallVector<Register, 8> &RegsToVisit,
1117 const unsigned MemSizeInBits) const;
1118
1119 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
1120 /// a re-association of its operands would break an existing legal addressing
1121 /// mode that the address computation currently represents.
1122 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd) const;
1123
1124 /// Behavior when a floating point min/max is given one NaN and one
1125 /// non-NaN as input.
1126 enum class SelectPatternNaNBehaviour {
1127 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
1128 RETURNS_NAN, /// Given one NaN input, returns the NaN.
1129 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1130 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
1131 /// known non-NaN.)
1132 };
1133
1134 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
1135 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
1136 ///
1137 /// If both \p LHS and \p RHS may be NaN, returns
1138 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
1139 SelectPatternNaNBehaviour
1140 computeRetValAgainstNaN(Register LHS, Register RHS,
1141 bool IsOrderedComparison) const;
1142
1143 /// Determines the floating point min/max opcode which should be used for
1144 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
1145 ///
1146 /// \returns 0 if this G_SELECT should not be combined to a floating point
1147 /// min or max. If it should be combined, returns one of
1148 ///
1149 /// * G_FMAXNUM
1150 /// * G_FMAXIMUM
1151 /// * G_FMINNUM
1152 /// * G_FMINIMUM
1153 ///
1154 /// Helper function for matchFPSelectToMinMax.
1155 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
1156 SelectPatternNaNBehaviour VsNaNRetVal) const;
1157
1158 /// Handle floating point cases for matchSimplifySelectToMinMax.
1159 ///
1160 /// E.g.
1161 ///
1162 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
1163 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
1164 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
1165 Register FalseVal, BuildFnTy &MatchInfo) const;
1166
1167 /// Try to fold selects to logical operations.
1168 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo) const;
1169
1170 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo) const;
1171
1172 bool isOneOrOneSplat(Register Src, bool AllowUndefs) const;
1173 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs) const;
1174 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1175 bool AllowUndefs) const;
1176 bool isConstantOrConstantVectorI(Register Src) const;
1177
1178 std::optional<APInt> getConstantOrConstantSplatVector(Register Src) const;
1179
1180 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1181 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1182 /// into a single comparison using range-based reasoning.
1183 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1184 BuildFnTy &MatchInfo) const;
1185
1186 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1187 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const;
1188
1189 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1190
1191 bool constantFoldICmp(const GICmp &ICmp, const GIConstant &LHSCst,
1192 const GIConstant &RHSCst, BuildFnTy &MatchInfo) const;
1193 bool constantFoldFCmp(const GFCmp &FCmp, const GFConstant &LHSCst,
1194 const GFConstant &RHSCst, BuildFnTy &MatchInfo) const;
1195};
1196} // namespace llvm
1197
1198#endif
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
Implement a low-level type suitable for MachineInstr level instruction selection.
Register Reg
MachineInstr unsigned OpIdx
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRepeatedFPDivisor(MachineInstr &MI, SmallVector< MachineInstr * > &MatchInfo) const
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match expression trees of the form.
bool tryCombine(MachineInstr &MI) const
Try to transform MI by using all of the above combine functions.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
void applyPtrAddZero(MachineInstr &MI) const
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2) const
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
void applyUDivOrURemByConst(MachineInstr &MI) const
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCtls(MachineInstr &CtlzMI, BuildFnTy &MatchInfo) const
bool matchSelectSameVal(MachineInstr &MI) const
Optimize (cond ? x : x) -> x.
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
void applySimplifyURemByPow2(MachineInstr &MI) const
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchPtrAddZero(MachineInstr &MI) const
}
const TargetInstrInfo * TII
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false) const
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
bool matchShiftsTooBig(MachineInstr &MI, std::optional< int64_t > &MatchInfo) const
Match shifts greater or equal to the range (the bitwidth of the result datatype, or the effective bit...
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement) const
Delete MI and replace all of its uses with Replacement.
void applyCombineShuffleToBuildVector(MachineInstr &MI) const
Replace MI with a build_vector.
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext of trunc.
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate commutative binary operations like G_ADD.
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector trunc on the vector register.
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCommuteConstantToRHS(MachineInstr &MI) const
Match constant LHS ops that should be commuted.
const DataLayout & getDataLayout() const
bool matchBinOpSameVal(MachineInstr &MI) const
Optimize (x op x) -> x.
bool matchSimplifyNegMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
Tranform (neg (min/max x, (neg x))) into (max/min x, (neg x)).
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine zext nneg to sext.
void applyUMulHToLShr(MachineInstr &MI) const
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
bool isLegalOrHasFewerElements(const LegalityQuery &Query) const
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
Fold (shift (shift base, x), y) -> (shift base (x+y))
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool matchTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
const TargetLowering & getTargetLowering() const
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine extract vector element with a insert vector element on the vector register and different indi...
bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const
Remove references to rhs if it is undef.
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Replace MI with a series of instructions described in MatchInfo.
void applySDivByPow2(MachineInstr &MI) const
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
void applyUDivByPow2(MachineInstr &MI) const
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ors.
bool matchLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo, MachineInstr &ShiftMI) const
Fold (lshr (trunc (lshr x, C1)), C2) -> trunc (shift x, (C1 + C2))
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine insert vector element OOB.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo) const
Return true if MI is a G_ADD which can be simplified to a G_SUB.
void replaceInstWithConstant(MachineInstr &MI, int64_t C) const
Replace an instruction with a G_CONSTANT with value C.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const
Checks if constant at ConstIdx is larger than MI 's bitwidth.
GISelValueTracking * getValueTracking() const
void applyCombineCopy(MachineInstr &MI) const
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine extract vector element.
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine sext of trunc.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const
Transform G_ADD(x, G_SUB(y, x)) to y.
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData) const
bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchSextTruncSextLoad(MachineInstr &MI) const
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const
Fold away a merge of an unmerge of the corresponding values.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, Register &UnmergeSrc) const
bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match (and (load x), mask) -> zextload x.
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchCombineCopy(MachineInstr &MI) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo) const
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops) const
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo) const
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute) const
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
void replaceInstWithFConstant(MachineInstr &MI, double C) const
Replace an instruction with a G_FCONSTANT with value C.
bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchFunnelShiftToRotate(MachineInstr &MI) const
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchOrShiftToFunnelShift(MachineInstr &MI, bool AllowScalarConstants, BuildFnTy &MatchInfo) const
bool matchRedundantSExtInReg(MachineInstr &MI) const
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
void applyFunnelShiftConstantModulo(MachineInstr &MI) const
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData) const
void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, GISelValueTracking *VT=nullptr, MachineDominatorTree *MDT=nullptr, const LegalizerInfo *LI=nullptr)
bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not reference a.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
Transform a multiply by a power-of-2 value to a left shift.
void applyCombineShuffleVector(MachineInstr &MI, ArrayRef< Register > Ops) const
Replace MI with a concat_vectors with Ops.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo) const
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo) const
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops) const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
bool tryCombineCopy(MachineInstr &MI) const
If MI is COPY, try to combine it.
bool matchTruncUSatU(MachineInstr &MI, MachineInstr &MinMI) const
const RegisterBankInfo & getRBI() const
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI, BuildFnTy &MatchInfo) const
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchUndefShuffleVectorMask(MachineInstr &MI) const
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const
const TargetRegisterInfo & getTRI() const
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is known to be a power of 2.
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops) const
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchExtractVectorElementWithShuffleVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a shuffle vector on the vector register.
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
LLVMContext & getContext() const
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate) const
Combine inverting a result of a compare into the opposite cond code.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo) const
Match sext_inreg(load p), imm -> sextload p.
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const
Combine select to integer min/max.
bool matchConstantFoldUnaryIntOp(MachineInstr &MI, BuildFnTy &MatchInfo) const
Constant fold a unary integer op (G_CTLZ, G_CTTZ, G_CTPOP and their _ZERO_POISON variants,...
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst) const
Transform fp_instr(cst) to constant result of the fp operation.
bool isLegal(const LegalityQuery &Query) const
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo) const
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo) const
Try to reassociate to reassociate operands of a commutative binop.
void eraseInst(MachineInstr &MI) const
Erase MI.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const
Do constant FP folding when opportunities are exposed after MIR building.
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const
bool matchUndefStore(MachineInstr &MI) const
Return true if a G_STORE instruction MI is storing an undef value.
MachineRegisterInfo & MRI
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const
Transform PtrToInt(IntToPtr(x)) to x.
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const
bool matchConstantFPOp(const MachineOperand &MOP, double C) const
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
MachineInstr * buildUDivOrURemUsingMul(MachineInstr &MI) const
Given an G_UDIV MI or G_UREM MI expressing a divide by constant, return an expression that implements...
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const
Push a binary operator through a select on constants.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount) const
bool tryCombineExtendingLoads(MachineInstr &MI) const
If MI is extend that consumes the result of a load, try to combine it.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo) const
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: shr (and x, n), k -> ubfx x, pos, width.
void applyTruncSSatS(MachineInstr &MI, Register &MatchInfo) const
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const
Do constant folding when opportunities are exposed after MIR building.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
void applyRotateOutOfRange(MachineInstr &MI) const
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchNarrowBinop(const MachineInstr &TruncMI, const MachineInstr &BinopMI, BuildFnTy &MatchInfo) const
trunc (binop X, C) --> binop (trunc X, trunc C).
bool matchUndefSelectCmp(MachineInstr &MI) const
Return true if a G_SELECT instruction MI has an undef comparison.
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const
void replaceInstWithUndef(MachineInstr &MI) const
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine addos.
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine selects.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo) const
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const
bool matchRotateOutOfRange(MachineInstr &MI) const
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
void setRegBank(Register Reg, const RegisterBank *RegBank) const
Set the register bank of Reg.
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const
Return true if a G_SELECT instruction MI has a constant comparison.
bool matchCommuteFPConstantToRHS(MachineInstr &MI) const
Match constant LHS FP ops that should be commuted.
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const
const TargetInstrInfo & getTII() const
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const
bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const
void applyTruncSSatU(MachineInstr &MI, Register &MatchInfo) const
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
bool matchConstantOp(const MachineOperand &MOP, int64_t C) const
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
const LegalizerInfo * LI
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const
void applyCombineBuildUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B, Register &UnmergeSrc) const
bool matchUMulHToLShr(MachineInstr &MI) const
MachineDominatorTree * MDT
MachineIRBuilder & getBuilder() const
void applyFunnelShiftToRotate(MachineInstr &MI) const
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applyRepeatedFPDivisor(SmallVector< MachineInstr * > &MatchInfo) const
bool matchTruncUSatUToFPTOUISat(MachineInstr &MI, MachineInstr &SrcMI) const
const RegisterBankInfo * RBI
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const
Match: (G_*MULO x, 0) -> 0 + no carry out.
GISelValueTracking * VT
bool matchBinopWithNeg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold a bitwiseop (~b +/- c) -> a bitwiseop ~(b -/+ c)
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts) const
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const
const TargetRegisterInfo * TRI
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const
Returns true if DefMI dominates UseMI.
GISelChangeObserver & Observer
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo) const
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal) const
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchUDivOrURemByConst(MachineInstr &MI) const
Combine G_UDIV or G_UREM by constant into a multiply by magic constant.
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Combine ands.
bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchRedundantSextInReg(MachineInstr &Root, MachineInstr &Other, BuildFnTy &MatchInfo) const
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const
Constant fold G_FMA/G_FMAD.
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const
Transform zext(trunc(x)) to x.
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const
Check if operand OpIdx is undef.
void applyLshrOfTruncOfLshr(MachineInstr &MI, LshrOfTruncOfLshr &MatchInfo) const
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo) const
void applySDivOrSRemByConst(MachineInstr &MI) const
MachineInstr * buildSDivOrSRemUsingMul(MachineInstr &MI) const
Given an G_SDIV MI or G_SREM MI expressing a signed divide by constant, return an expression that imp...
bool isLegalOrHasWidenScalar(const LegalityQuery &Query) const
bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchCastOfBuildVector(const MachineInstr &CastMI, const MachineInstr &BVMI, BuildFnTy &MatchInfo) const
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo) const
bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const
Transform anyext(trunc(x)) to x.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo) const
MachineIRBuilder & Builder
void applyCommuteBinOpOperands(MachineInstr &MI) const
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const
Delete MI and replace all of its uses with its OpIdx-th operand.
void applySextTruncSextLoad(MachineInstr &MI) const
const MachineFunction & getMachineFunction() const
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo) const
bool matchExtractVectorElementWithBuildVector(const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) const
Combine extract vector element with a build vector on the vector register.
bool matchSDivOrSRemByConst(MachineInstr &MI) const
Combine G_SDIV or G_SREM by constant into a multiply by magic constant.
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond) const
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal) const
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo) const
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const
Match FPOWI if it's safe to extend it into a series of multiplications.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo) const
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands) const
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo) const
Match ashr (shl x, C), C -> sext_inreg (C)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Represent a G_FCMP.
An floating-point-like constant.
Definition Utils.h:676
Represent a G_ICMP.
An integer-like constant.
Definition Utils.h:637
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
Represents a G_ZEXTLOAD.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
std::function< void(MachineIRBuilder &)> BuildFnTy
SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > OperandBuildSteps
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
const RegisterBank * Bank