LLVM 20.0.0git
CombinerHelper.h
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===--------------------------------------------------------------------===//
8/// \file
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14///
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19
20#include "llvm/ADT/DenseMap.h"
25#include "llvm/IR/InstrTypes.h"
26#include <functional>
27
28namespace llvm {
29
30class GISelChangeObserver;
31class APInt;
32class ConstantFP;
33class GPtrAdd;
34class GZExtLoad;
35class MachineIRBuilder;
36class MachineInstrBuilder;
37class MachineRegisterInfo;
38class MachineInstr;
39class MachineOperand;
40class GISelKnownBits;
41class MachineDominatorTree;
42class LegalizerInfo;
43struct LegalityQuery;
44class RegisterBank;
45class RegisterBankInfo;
46class TargetLowering;
47class TargetRegisterInfo;
48
50 LLT Ty; // The result type of the extend.
51 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
53};
54
59 bool RematOffset = false; // True if Offset is a constant that needs to be
60 // rematerialized before the new load/store.
61 bool IsPre = false;
62};
63
65 int64_t Imm;
68};
69
72 int64_t Imm;
73};
74
80};
81
82using BuildFnTy = std::function<void(MachineIRBuilder &)>;
83
85 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
87 unsigned Opcode = 0; /// The opcode for the produced instruction.
88 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
92};
93
95 /// Describes instructions to be built during a combine.
99 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
101};
102
104protected:
114
115public:
117 bool IsPreLegalize,
118 GISelKnownBits *KB = nullptr,
119 MachineDominatorTree *MDT = nullptr,
120 const LegalizerInfo *LI = nullptr);
121
123 return KB;
124 }
125
127 return Builder;
128 }
129
130 const TargetLowering &getTargetLowering() const;
131
132 const MachineFunction &getMachineFunction() const;
133
134 const DataLayout &getDataLayout() const;
135
136 LLVMContext &getContext() const;
137
138 /// \returns true if the combiner is running pre-legalization.
139 bool isPreLegalize() const;
140
141 /// \returns true if \p Query is legal on the target.
142 bool isLegal(const LegalityQuery &Query) const;
143
144 /// \return true if the combine is running prior to legalization, or if \p
145 /// Query is legal on the target.
146 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
147
148 /// \return true if the combine is running prior to legalization, or if \p Ty
149 /// is a legal integer constant type on the target.
150 bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
151
152 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
153 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
154
155 /// Replace a single register operand with a new register and inform the
156 /// observer of the changes.
158 Register ToReg) const;
159
160 /// Replace the opcode in instruction with a new opcode and inform the
161 /// observer of the changes.
162 void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
163
164 /// Get the register bank of \p Reg.
165 /// If Reg has not been assigned a register, a register class,
166 /// or a register bank, then this returns nullptr.
167 ///
168 /// \pre Reg.isValid()
169 const RegisterBank *getRegBank(Register Reg) const;
170
171 /// Set the register bank of \p Reg.
172 /// Does nothing if the RegBank is null.
173 /// This is the counterpart to getRegBank.
174 void setRegBank(Register Reg, const RegisterBank *RegBank);
175
176 /// If \p MI is COPY, try to combine it.
177 /// Returns true if MI changed.
181
182 /// Returns true if \p DefMI precedes \p UseMI or they are the same
183 /// instruction. Both must be in the same basic block.
185
186 /// Returns true if \p DefMI dominates \p UseMI. By definition an
187 /// instruction dominates itself.
188 ///
189 /// If we haven't been provided with a MachineDominatorTree during
190 /// construction, this function returns a conservative result that tracks just
191 /// a single basic block.
192 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
193
194 /// If \p MI is extend that consumes the result of a load, try to combine it.
195 /// Returns true if MI changed.
199
200 /// Match (and (load x), mask) -> zextload x
202
203 /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
204 /// load.
206
209
212
213 /// Match sext_inreg(load p), imm -> sextload p
214 bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
215 void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
216
217 /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
218 /// when their source operands are identical.
221
222 /// If a brcond's true block is not the fallthrough, make it so by inverting
223 /// the condition and swapping operands.
226
227 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
228 /// Returns true if MI changed.
229 /// Right now, we support:
230 /// - concat_vector(undef, undef) => undef
231 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
232 /// build_vector(A, B, C, D)
233 /// ==========================================================
234 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
235 /// can be flattened into a build_vector.
236 /// In the first case \p Ops will be empty
237 /// In the second case \p Ops will contain the operands
238 /// needed to produce the flattened build_vector.
239 ///
240 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
242 /// Replace \p MI with a flattened build_vector with \p Ops
243 /// or an implicit_def if \p Ops is empty.
245
247 /// Replace \p MI with a flattened build_vector with \p Ops
248 /// or an implicit_def if \p Ops is empty.
250
251 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
252 /// Returns true if MI changed.
253 ///
254 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
256 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
257 /// concat_vectors.
258 /// \p Ops will contain the operands needed to produce the flattened
259 /// concat_vectors.
260 ///
261 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
264 /// Replace \p MI with a concat_vectors with \p Ops.
266 const ArrayRef<Register> Ops);
269
270 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
271 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
272 ///
273 /// For example (pre-indexed):
274 ///
275 /// $addr = G_PTR_ADD $base, $offset
276 /// [...]
277 /// $val = G_LOAD $addr
278 /// [...]
279 /// $whatever = COPY $addr
280 ///
281 /// -->
282 ///
283 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
284 /// [...]
285 /// $whatever = COPY $addr
286 ///
287 /// or (post-indexed):
288 ///
289 /// G_STORE $val, $base
290 /// [...]
291 /// $addr = G_PTR_ADD $base, $offset
292 /// [...]
293 /// $whatever = COPY $addr
294 ///
295 /// -->
296 ///
297 /// $addr = G_INDEXED_STORE $val, $base, $offset
298 /// [...]
299 /// $whatever = COPY $addr
300 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
301
304
305 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
308
309 /// If we have a shift-by-constant of a bitwise logic op that itself has a
310 /// shift-by-constant operand with identical opcode, we may be able to convert
311 /// that into 2 independent shifts followed by the logic op.
313 ShiftOfShiftedLogic &MatchInfo);
315 ShiftOfShiftedLogic &MatchInfo);
316
317 bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo);
318
319 /// Transform a multiply by a power-of-2 value to a left shift.
320 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
321 void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
322
323 // Transform a G_SHL with an extended source into a narrower shift if
324 // possible.
327 const RegisterImmPair &MatchData);
328
329 /// Fold away a merge of an unmerge of the corresponding values.
331
332 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
333 /// type. This will not produce a shift smaller than \p TargetShiftSize.
334 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
335 unsigned &ShiftVal);
336 void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
337 bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
338
339 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
340 bool
343 void
346
347 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
352
353 /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
354 bool
356 std::function<void(MachineIRBuilder &)> &MatchInfo);
357
358 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
361
362 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
365
366 /// Transform fp_instr(cst) to constant result of the fp operation.
368
369 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
372
373 /// Transform PtrToInt(IntToPtr(x)) to x.
375
376 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
377 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
379 std::pair<Register, bool> &PtrRegAndCommute);
381 std::pair<Register, bool> &PtrRegAndCommute);
382
383 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
386
387 /// Transform anyext(trunc(x)) to x.
389
390 /// Transform zext(trunc(x)) to x.
392
393 /// Transform trunc (shl x, K) to shl (trunc x), K
394 /// if K < VT.getScalarSizeInBits().
395 ///
396 /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
397 /// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
398 /// MidVT is obtained by finding a legal type between the trunc's src and dst
399 /// types.
401 std::pair<MachineInstr *, LLT> &MatchInfo);
403 std::pair<MachineInstr *, LLT> &MatchInfo);
404
405 /// Return true if any explicit use operand on \p MI is defined by a
406 /// G_IMPLICIT_DEF.
408
409 /// Return true if all register explicit use operands on \p MI are defined by
410 /// a G_IMPLICIT_DEF.
412
413 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
415
416 /// Return true if a G_STORE instruction \p MI is storing an undef value.
418
419 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
421
422 /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
424
425 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
426 /// true, \p OpIdx will store the operand index of the known selected value.
427 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
428
429 /// Replace an instruction with a G_FCONSTANT with value \p C.
431
432 /// Replace an instruction with an G_FCONSTANT with value \p CFP.
434
435 /// Replace an instruction with a G_CONSTANT with value \p C.
437
438 /// Replace an instruction with a G_CONSTANT with value \p C.
440
441 /// Replace an instruction with a G_IMPLICIT_DEF.
443
444 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
445 void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
446
447 /// Delete \p MI and replace all of its uses with \p Replacement.
449
450 /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
451 /// @param MI
453
454 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
455 /// equivalent instructions.
456 bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
457
458 /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
459 /// \p C.
460 bool matchConstantOp(const MachineOperand &MOP, int64_t C);
461
462 /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
463 /// equal to \p C.
464 bool matchConstantFPOp(const MachineOperand &MOP, double C);
465
466 /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
467 /// @param ConstIdx Index of the constant
468 bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx);
469
470 /// Optimize (cond ? x : x) -> x
472
473 /// Optimize (x op x) -> x
475
476 /// Check if operand \p OpIdx is zero.
477 bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
478
479 /// Check if operand \p OpIdx is undef.
480 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
481
482 /// Check if operand \p OpIdx is known to be a power of 2.
484
485 /// Erase \p MI
487
488 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
490 std::tuple<Register, Register> &MatchInfo);
492 std::tuple<Register, Register> &MatchInfo);
493
494 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
495 bool
497 InstructionStepsMatchInfo &MatchInfo);
498
499 /// Replace \p MI with a series of instructions described in \p MatchInfo.
501 InstructionStepsMatchInfo &MatchInfo);
502
503 /// Match ashr (shl x, C), C -> sext_inreg (C)
505 std::tuple<Register, int64_t> &MatchInfo);
507 std::tuple<Register, int64_t> &MatchInfo);
508
509 /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
511 BuildFnTy &MatchInfo);
512
513 /// \return true if \p MI is a G_AND instruction whose operands are x and y
514 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
515 ///
516 /// \param [in] MI - The G_AND instruction.
517 /// \param [out] Replacement - A register the G_AND should be replaced with on
518 /// success.
519 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
520
521 /// \return true if \p MI is a G_OR instruction whose operands are x and y
522 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
523 /// value.)
524 ///
525 /// \param [in] MI - The G_OR instruction.
526 /// \param [out] Replacement - A register the G_OR should be replaced with on
527 /// success.
528 bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
529
530 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
532
533 /// Combine inverting a result of a compare into the opposite cond code.
536
537 /// Fold (xor (and x, y), y) -> (and (not x), y)
538 ///{
540 std::pair<Register, Register> &MatchInfo);
542 std::pair<Register, Register> &MatchInfo);
543 ///}
544
545 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
548
549 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
551
552 /// Push a binary operator through a select on constants.
553 ///
554 /// binop (select cond, K0, K1), K2 ->
555 /// select cond, (binop K0, K2), (binop K1, K2)
556 bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
557 void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);
558
560 SmallVectorImpl<Register> &MatchInfo);
561
563 SmallVectorImpl<Register> &MatchInfo);
564
565 /// Match expression trees of the form
566 ///
567 /// \code
568 /// sN *a = ...
569 /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
570 /// \endcode
571 ///
572 /// And check if the tree can be replaced with a M-bit load + possibly a
573 /// bswap.
574 bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo);
575
578
581
584 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
587 SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
588
589 /// Use a function which takes in a MachineIRBuilder to perform a combine.
590 /// By default, it erases the instruction \p MI from the function.
591 void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo);
592 /// Use a function which takes in a MachineIRBuilder to perform a combine.
593 /// This variant does not erase \p MI after calling the build function.
594 void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);
595
601
602 /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
603 /// or false constant based off of KnownBits information.
604 bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
605
606 /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
607 /// KnownBits information.
608 bool
610 BuildFnTy &MatchInfo);
611
612 /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
614
616 BuildFnTy &MatchInfo);
617 /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
619
620 /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
622
623 /// Match: shr (and x, n), k -> ubfx x, pos, width
625
626 // Helpers for reassociation:
628 BuildFnTy &MatchInfo);
631 BuildFnTy &MatchInfo);
633 MachineInstr *RHS, BuildFnTy &MatchInfo);
634 /// Reassociate pointer calculations with G_ADD involved, to allow better
635 /// addressing mode usage.
636 bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo);
637
638 /// Try to reassociate to reassociate operands of a commutative binop.
639 bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
640 Register Op1, BuildFnTy &MatchInfo);
641 /// Reassociate commutative binary operations like G_ADD.
643
644 /// Do constant folding when opportunities are exposed after MIR building.
645 bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo);
646
647 /// Do constant folding when opportunities are exposed after MIR building.
648 bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo);
649
650 /// Do constant FP folding when opportunities are exposed after MIR building.
652
653 /// Constant fold G_FMA/G_FMAD.
655
656 /// \returns true if it is possible to narrow the width of a scalar binop
657 /// feeding a G_AND instruction \p MI.
659
660 /// Given an G_UDIV \p MI expressing a divide by constant, return an
661 /// expression that implements it by multiplying by a magic number.
662 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
664 /// Combine G_UDIV by constant into a multiply by magic constant.
667
668 /// Given an G_SDIV \p MI expressing a signed divide by constant, return an
669 /// expression that implements it by multiplying by a magic number.
670 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
674
675 /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
676 /// return expressions that implements it by shifting.
677 bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
679 /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
680 /// return expressions that implements it by shifting.
682
683 // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
686
687 /// Try to transform \p MI by using all of the above
688 /// combine functions. Returns true if changed.
690
691 /// Emit loads and stores that perform the given memcpy.
692 /// Assumes \p MI is a G_MEMCPY_INLINE
693 /// TODO: implement dynamically sized inline memcpy,
694 /// and rename: s/bool tryEmit/void emit/
696
697 /// Match:
698 /// (G_UMULO x, 2) -> (G_UADDO x, x)
699 /// (G_SMULO x, 2) -> (G_SADDO x, x)
700 bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);
701
702 /// Match:
703 /// (G_*MULO x, 0) -> 0 + no carry out
704 bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);
705
706 /// Match:
707 /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
708 /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
709 bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo);
710
711 /// Transform (fadd x, fneg(y)) -> (fsub x, y)
712 /// (fadd fneg(x), y) -> (fsub y, x)
713 /// (fsub x, fneg(y)) -> (fadd x, y)
714 /// (fmul fneg(x), fneg(y)) -> (fmul x, y)
715 /// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
716 /// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
717 /// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
719
720 bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo);
721 void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo);
722
723 bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
724 bool &HasFMAD, bool &Aggressive,
725 bool CanReassociate = false);
726
727 /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
728 /// (fadd (fmul x, y), z) -> (fmad x, y, z)
730
731 /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
732 /// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
734 BuildFnTy &MatchInfo);
735
736 /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
737 /// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
739 BuildFnTy &MatchInfo);
740
741 // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
742 // -> (fma x, y, (fma (fpext u), (fpext v), z))
743 // (fadd (fmad x, y, (fpext (fmul u, v))), z)
744 // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
746 BuildFnTy &MatchInfo);
747
748 /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
749 /// (fsub (fmul x, y), z) -> (fmad x, y, -z)
751
752 /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
753 /// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
755 BuildFnTy &MatchInfo);
756
757 /// Transform (fsub (fpext (fmul x, y)), z)
758 /// -> (fma (fpext x), (fpext y), (fneg z))
759 /// (fsub (fpext (fmul x, y)), z)
760 /// -> (fmad (fpext x), (fpext y), (fneg z))
762 BuildFnTy &MatchInfo);
763
764 /// Transform (fsub (fpext (fneg (fmul x, y))), z)
765 /// -> (fneg (fma (fpext x), (fpext y), z))
766 /// (fsub (fpext (fneg (fmul x, y))), z)
767 /// -> (fneg (fmad (fpext x), (fpext y), z))
769 BuildFnTy &MatchInfo);
770
771 bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info);
772
773 /// Transform G_ADD(x, G_SUB(y, x)) to y.
774 /// Transform G_ADD(G_SUB(y, x), x) to y.
776
780
781 /// Transform:
782 /// (x + y) - y -> x
783 /// (x + y) - x -> y
784 /// x - (y + x) -> 0 - y
785 /// x - (x + z) -> 0 - z
786 bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo);
787
788 /// \returns true if it is possible to simplify a select instruction \p MI
789 /// to a min/max instruction of some sort.
791
792 /// Transform:
793 /// (X + Y) == X -> Y == 0
794 /// (X - Y) == X -> Y == 0
795 /// (X ^ Y) == X -> Y == 0
796 /// (X + Y) != X -> Y != 0
797 /// (X - Y) != X -> Y != 0
798 /// (X ^ Y) != X -> Y != 0
800
801 /// Match shifts greater or equal to the bitwidth of the operation.
803
804 /// Match constant LHS ops that should be commuted.
806
807 /// Combine sext of trunc.
808 bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
809
810 /// Combine zext of trunc.
811 bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
812
813 /// Combine zext nneg to sext.
814 bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo);
815
816 /// Match constant LHS FP ops that should be commuted.
818
819 // Given a binop \p MI, commute operands 1 and 2.
821
822 /// Combine select to integer min/max.
823 bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo);
824
825 /// Combine selects.
826 bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo);
827
828 /// Combine ands.
829 bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
830
831 /// Combine ors.
832 bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo);
833
834 /// Combine addos.
835 bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo);
836
837 /// Combine extract vector element.
839
840 /// Combine extract vector element with a build vector on the vector register.
842 BuildFnTy &MatchInfo);
843
844 /// Combine extract vector element with a build vector trunc on the vector
845 /// register.
847 BuildFnTy &MatchInfo);
848
849 /// Combine extract vector element with a shuffle vector on the vector
850 /// register.
852 BuildFnTy &MatchInfo);
853
854 /// Combine extract vector element with a insert vector element on the vector
855 /// register and different indices.
857 BuildFnTy &MatchInfo);
858 /// Use a function which takes in a MachineIRBuilder to perform a combine.
859 /// By default, it erases the instruction def'd on \p MO from the function.
860 void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo);
861
862 /// Match FPOWI if it's safe to extend it into a series of multiplications.
864
865 /// Expands FPOWI into a series of multiplications and a division if the
866 /// exponent is negative.
868
869 /// Combine insert vector element OOB.
871
873 BuildFnTy &MatchInfo);
874
875 bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
876
877 bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
878
879 bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
880
881 bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
882
883 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
884 bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
885 BuildFnTy &MatchInfo);
886
887 bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
888 BuildFnTy &MatchInfo);
889 bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
890
891 bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo);
892
893 bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
894
895 bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
896
897 // fold ((A-C1)+C2) -> (A+(C2-C1))
898 bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
899
900 bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
901 BuildFnTy &MatchInfo);
902
903 bool matchCastOfBuildVector(const MachineInstr &CastMI,
904 const MachineInstr &BVMI, BuildFnTy &MatchInfo);
905
906private:
907 /// Checks for legality of an indexed variant of \p LdSt.
908 bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
909 /// Given a non-indexed load or store instruction \p MI, find an offset that
910 /// can be usefully and legally folded into it as a post-indexing operation.
911 ///
912 /// \returns true if a candidate is found.
913 bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
914 Register &Offset, bool &RematOffset);
915
916 /// Given a non-indexed load or store instruction \p MI, find an offset that
917 /// can be usefully and legally folded into it as a pre-indexing operation.
918 ///
919 /// \returns true if a candidate is found.
920 bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
922
923 /// Helper function for matchLoadOrCombine. Searches for Registers
924 /// which may have been produced by a load instruction + some arithmetic.
925 ///
926 /// \param [in] Root - The search root.
927 ///
928 /// \returns The Registers found during the search.
929 std::optional<SmallVector<Register, 8>>
930 findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
931
932 /// Helper function for matchLoadOrCombine.
933 ///
934 /// Checks if every register in \p RegsToVisit is defined by a load
935 /// instruction + some arithmetic.
936 ///
937 /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
938 /// at to the index of the load.
939 /// \param [in] MemSizeInBits - The number of bits each load should produce.
940 ///
941 /// \returns On success, a 3-tuple containing lowest-index load found, the
942 /// lowest index, and the last load in the sequence.
943 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
944 findLoadOffsetsForLoadOrCombine(
946 const SmallVector<Register, 8> &RegsToVisit,
947 const unsigned MemSizeInBits);
948
949 /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
950 /// a re-association of its operands would break an existing legal addressing
951 /// mode that the address computation currently represents.
952 bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd);
953
954 /// Behavior when a floating point min/max is given one NaN and one
955 /// non-NaN as input.
956 enum class SelectPatternNaNBehaviour {
957 NOT_APPLICABLE = 0, /// NaN behavior not applicable.
958 RETURNS_NAN, /// Given one NaN input, returns the NaN.
959 RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
960 RETURNS_ANY /// Given one NaN input, can return either (or both operands are
961 /// known non-NaN.)
962 };
963
964 /// \returns which of \p LHS and \p RHS would be the result of a non-equality
965 /// floating point comparison where one of \p LHS and \p RHS may be NaN.
966 ///
967 /// If both \p LHS and \p RHS may be NaN, returns
968 /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
969 SelectPatternNaNBehaviour
970 computeRetValAgainstNaN(Register LHS, Register RHS,
971 bool IsOrderedComparison) const;
972
973 /// Determines the floating point min/max opcode which should be used for
974 /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
975 ///
976 /// \returns 0 if this G_SELECT should not be combined to a floating point
977 /// min or max. If it should be combined, returns one of
978 ///
979 /// * G_FMAXNUM
980 /// * G_FMAXIMUM
981 /// * G_FMINNUM
982 /// * G_FMINIMUM
983 ///
984 /// Helper function for matchFPSelectToMinMax.
985 unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
986 SelectPatternNaNBehaviour VsNaNRetVal) const;
987
988 /// Handle floating point cases for matchSimplifySelectToMinMax.
989 ///
990 /// E.g.
991 ///
992 /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
993 /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
994 bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
995 Register FalseVal, BuildFnTy &MatchInfo);
996
997 /// Try to fold selects to logical operations.
998 bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo);
999
1000 bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo);
1001
1002 bool isOneOrOneSplat(Register Src, bool AllowUndefs);
1003 bool isZeroOrZeroSplat(Register Src, bool AllowUndefs);
1004 bool isConstantSplatVector(Register Src, int64_t SplatValue,
1005 bool AllowUndefs);
1006 bool isConstantOrConstantVectorI(Register Src) const;
1007
1008 std::optional<APInt> getConstantOrConstantSplatVector(Register Src);
1009
1010 /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
1011 /// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
1012 /// into a single comparison using range-based reasoning.
1013 bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
1014 BuildFnTy &MatchInfo);
1015
1016 // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1017 bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
1018
1019 bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
1020};
1021} // namespace llvm
1022
1023#endif
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
amdgpu AMDGPU Register Bank Select
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file defines the DenseMap class.
uint64_t Addr
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
Implement a low-level type suitable for MachineInstr level instruction selection.
mir Rename Register Operands
unsigned Reg
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:77
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
void applyUDivByConst(MachineInstr &MI)
void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
bool matchCombineShuffleVector(MachineInstr &MI, SmallVectorImpl< Register > &Ops)
Check if the G_SHUFFLE_VECTOR MI can be replaced by a concat_vectors.
bool matchPtrAddZero(MachineInstr &MI)
}
bool matchAllExplicitUsesAreUndef(MachineInstr &MI)
Return true if all register explicit use operands on MI are defined by a G_IMPLICIT_DEF.
void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx)
Delete MI and replace all of its uses with its OpIdx-th operand.
const RegisterBank * getRegBank(Register Reg) const
Get the register bank of Reg.
bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate pointer calculations with G_ADD involved, to allow better addressing mode usage.
bool matchUDivByConst(MachineInstr &MI)
Combine G_UDIV by constant into a multiply by magic constant.
void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI)
Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
bool matchShiftsTooBig(MachineInstr &MI)
Match shifts greater or equal to the bitwidth of the operation.
bool tryCombineCopy(MachineInstr &MI)
If MI is COPY, try to combine it.
bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
bool matchUndefStore(MachineInstr &MI)
Return true if a G_STORE instruction MI is storing an undef value.
bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (X + Y) == X -> Y == 0 (X - Y) == X -> Y == 0 (X ^ Y) == X -> Y == 0 (X + Y) !...
bool matchRedundantSExtInReg(MachineInstr &MI)
bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine sext of trunc.
bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS, BuildFnTy &MatchInfo)
bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent)
Match FPOWI if it's safe to extend it into a series of multiplications.
bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform: (x + y) - y -> x (x + y) - x -> y x - (y + x) -> 0 - y x - (x + z) -> 0 - z.
bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo)
Do constant FP folding when opportunities are exposed after MIR building.
void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal)
void applyCombineUnmergeZExtToZExt(MachineInstr &MI)
void applyCommuteBinOpOperands(MachineInstr &MI)
bool matchBinOpSameVal(MachineInstr &MI)
Optimize (x op x) -> x.
bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
void applyCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) (fsub (fneg (fmul,...
bool matchCombineCopy(MachineInstr &MI)
bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx)
Return true if a G_SELECT instruction MI has a constant comparison.
void eraseInst(MachineInstr &MI)
Erase MI.
bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, Register ToReg) const
Replace a single register operand with a new register and inform the observer of the changes.
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) (fadd (fmad x,...
bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a insert vector element on the vector register and different indi...
void applySimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
If MI is G_CONCAT_VECTORS, try to combine it.
bool matchAddSubSameReg(MachineInstr &MI, Register &Src)
Transform G_ADD(x, G_SUB(y, x)) to y.
void applyRotateOutOfRange(MachineInstr &MI)
const DataLayout & getDataLayout() const
bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_UMULO x, 2) -> (G_UADDO x, x) (G_SMULO x, 2) -> (G_SADDO x, x)
bool matchRotateOutOfRange(MachineInstr &MI)
void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void applyCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a build vector trunc on the vector register.
void applyCombineShuffleVector(MachineInstr &MI, const ArrayRef< Register > Ops)
Replace MI with a concat_vectors with Ops.
const TargetLowering & getTargetLowering() const
void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
void applyPtrAddZero(MachineInstr &MI)
bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo)
void setRegBank(Register Reg, const RegisterBank *RegBank)
Set the register bank of Reg.
bool matchRedundantAnd(MachineInstr &MI, Register &Replacement)
void replaceInstWithConstant(MachineInstr &MI, int64_t C)
Replace an instruction with a G_CONSTANT with value C.
bool matchAshrShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
Match ashr (shl x, C), C -> sext_inreg (C)
bool tryCombineExtendingLoads(MachineInstr &MI)
If MI is extend that consumes the result of a load, try to combine it.
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount)
bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
bool matchCombineUnmergeUndef(MachineInstr &MI, std::function< void(MachineIRBuilder &)> &MatchInfo)
Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
GISelKnownBits * getKnownBits() const
void applySDivByConst(MachineInstr &MI)
bool matchUndefSelectCmp(MachineInstr &MI)
Return true if a G_SELECT instruction MI has an undef comparison.
void replaceInstWithUndef(MachineInstr &MI)
Replace an instruction with a G_IMPLICIT_DEF.
bool matchRedundantOr(MachineInstr &MI, Register &Replacement)
bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is undef.
void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst)
void replaceInstWithFConstant(MachineInstr &MI, double C)
Replace an instruction with a G_FCONSTANT with value C.
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI, BuildFnTy &MatchInfo)
Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2)
Return true if MOP1 and MOP2 are register operands are defined by equivalent instructions.
bool tryCombine(MachineInstr &MI)
Try to transform MI by using all of the above combine functions.
bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
Fold (shift (shift base, x), y) -> (shift base (x+y))
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo)
void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*MULO x, 0) -> 0 + no carry out.
void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement)
Delete MI and replace all of its uses with Replacement.
bool matchFunnelShiftToRotate(MachineInstr &MI)
Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
bool matchNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
Combine inverting a result of a compare into the opposite cond code.
void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const
Replace the opcode in instruction with a new opcode and inform the observer of the changes.
bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is known to be a power of 2.
bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
void applyCombineCopy(MachineInstr &MI)
bool matchAnyExplicitUseIsUndef(MachineInstr &MI)
Return true if any explicit use operand on MI is defined by a G_IMPLICIT_DEF.
bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyCombineConcatVectors(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
bool matchSextTruncSextLoad(MachineInstr &MI)
bool matchShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
If we have a shift-by-constant of a bitwise logic op that itself has a shift-by-constant operand with...
bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine insert vector element OOB.
GISelKnownBits * KB
bool matchExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
MachineInstr * buildSDivUsingMul(MachineInstr &MI)
Given an G_SDIV MI expressing a signed divide by constant, return an expression that implements it by...
void applySDivByPow2(MachineInstr &MI)
void applyFunnelShiftConstantModulo(MachineInstr &MI)
Replaces the shift amount in MI with ShiftAmt % BW.
bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
bool isPreLegalize() const
bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo)
Match (and (load x), mask) -> zextload x.
bool matchConstantOp(const MachineOperand &MOP, int64_t C)
Return true if MOP is defined by a G_CONSTANT or splat with a value equal to C.
bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fmul x, y), z) -> (fma x, y, -z) (fsub (fmul x, y), z) -> (fmad x,...
bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ands.
void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg)
void applyNotCmp(MachineInstr &MI, SmallVectorImpl< Register > &RegsToNegate)
void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
bool matchConstantFPOp(const MachineOperand &MOP, double C)
Return true if MOP is defined by a G_FCONSTANT or splat with a value exactly equal to C.
bool matchSimplifyAddToSub(MachineInstr &MI, std::tuple< Register, Register > &MatchInfo)
Return true if MI is a G_ADD which can be simplified to a G_SUB.
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0)
Optimize memcpy intrinsics et al, e.g.
bool matchSelectSameVal(MachineInstr &MI)
Optimize (cond ? x : x) -> x.
void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst)
Transform fp_instr(cst) to constant result of the fp operation.
bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo)
LLVMContext & getContext() const
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0, Register Op1, BuildFnTy &MatchInfo)
Try to reassociate to reassociate operands of a commutative binop.
bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const
bool tryEmitMemcpyInline(MachineInstr &MI)
Emit loads and stores that perform the given memcpy.
void applyXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
bool matchXorOfAndWithSameReg(MachineInstr &MI, std::pair< Register, Register > &MatchInfo)
Fold (xor (and x, y), y) -> (and (not x), y) {.
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI, BuildFnTy &MatchInfo)
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) (fsub (fpext (fmul x,...
bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info)
bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData)
bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo)
Constant fold G_FMA/G_FMAD.
bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: and (lshr x, cst), mask -> ubfx x, cst, width.
void applyShiftOfShiftedLogic(MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo)
void applyExpandFPowI(MachineInstr &MI, int64_t Exponent)
Expands FPOWI into a series of multiplications and a division if the exponent is negative.
bool isLegal(const LegalityQuery &Query) const
bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine selects.
bool matchCombineUnmergeConstant(MachineInstr &MI, SmallVectorImpl< APInt > &Csts)
Transform G_UNMERGE Constant -> Constant1, Constant2, ...
bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo)
bool matchExtractVectorElementWithShuffleVector(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a shuffle vector on the vector register.
bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo)
bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg)
Transform anyext(trunc(x)) to x.
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI, BuildFnTy &MatchInfo)
void applySimplifyURemByPow2(MachineInstr &MI)
Combine G_UREM x, (known power of 2) to an add and bitmasking.
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
Replace MI with a flattened build_vector with Ops or an implicit_def if Ops is empty.
MachineRegisterInfo & MRI
void applyUMulHToLShr(MachineInstr &MI)
bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo)
Match expression trees of the form.
bool matchShuffleToExtract(MachineInstr &MI)
bool matchUndefShuffleVectorMask(MachineInstr &MI)
Return true if a G_SHUFFLE_VECTOR instruction MI has an undef mask.
bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const
bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed load.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal)
Transform a multiply by a power-of-2 value to a left shift.
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width.
void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo)
SelectOperand is the operand in binary operator MI that is the select to fold.
bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo)
bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fmul x, y), z) -> (fma x, y, z) (fadd (fmul x, y), z) -> (fmad x,...
bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd x, fneg(y)) -> (fsub x, y) (fadd fneg(x), y) -> (fsub y, x) (fsub x,...
bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo)
Fold away a merge of an unmerge of the corresponding values.
void applyCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineUnmergeZExtToZExt(MachineInstr &MI)
Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0.
bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx)
Checks if constant at ConstIdx is larger than MI 's bitwidth.
bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM when their source operands are iden...
bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo)
bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI precedes UseMI or they are the same instruction.
bool matchDivByPow2(MachineInstr &MI, bool IsSigned)
Given an G_SDIV MI expressing a signed divided by a pow2 constant, return expressions that implements...
bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg)
bool matchUMulHToLShr(MachineInstr &MI)
bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI)
Returns true if DefMI dominates UseMI.
bool matchExtractVectorElementWithBuildVector(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine extract vector element with a build vector on the vector register.
MachineInstr * buildUDivUsingMul(MachineInstr &MI)
Given an G_UDIV MI expressing a divide by constant, return an expression that implements it by multip...
bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg)
Transform zext(trunc(x)) to x.
void applyCombineShlOfExtend(MachineInstr &MI, const RegisterImmPair &MatchData)
bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext nneg to sext.
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally, bool &HasFMAD, bool &Aggressive, bool CanReassociate=false)
const LegalizerInfo * LI
bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine zext of trunc.
void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI)
void applyShuffleToExtract(MachineInstr &MI)
MachineDominatorTree * MDT
bool matchSDivByConst(MachineInstr &MI)
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI, BuildFnTy &MatchInfo)
Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
MachineIRBuilder & getBuilder() const
void applySextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
bool matchCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
void applyExtractAllEltsFromBuildVector(MachineInstr &MI, SmallVectorImpl< std::pair< Register, MachineInstr * > > &MatchInfo)
void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo)
Use a function which takes in a MachineIRBuilder to perform a combine.
bool matchCombineTruncOfShift(MachineInstr &MI, std::pair< MachineInstr *, LLT > &MatchInfo)
Transform trunc (shl x, K) to shl (trunc x), K if K < VT.getScalarSizeInBits().
const RegisterBankInfo * RBI
bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine extract vector element.
bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo)
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) (fadd (fpext (fmul x,...
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS, BuildFnTy &MatchInfo)
void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI)
const TargetRegisterInfo * TRI
bool tryCombineShuffleVector(MachineInstr &MI)
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
bool matchCastOfBuildVector(const MachineInstr &CastMI, const MachineInstr &BVMI, BuildFnTy &MatchInfo)
bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg)
Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo)
GISelChangeObserver & Observer
bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0.
bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple< Register, unsigned > &MatchInfo)
Match sext_inreg(load p), imm -> sextload p.
bool matchCombineInsertVecElts(MachineInstr &MI, SmallVectorImpl< Register > &MatchInfo)
bool matchCombineAddP2IToPtrAdd(MachineInstr &MI, std::pair< Register, bool > &PtrRegAndCommute)
Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y) Transform G_ADD y,...
bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine ors.
void applyFunnelShiftToRotate(MachineInstr &MI)
void applyCombineUnmergeMergeToPlainValues(MachineInstr &MI, SmallVectorImpl< Register > &Operands)
bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond)
If a brcond's true block is not the fallthrough, make it so by inverting the condition and swapping o...
bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: (G_*ADDE x, y, 0) -> (G_*ADDO x, y) (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo)
Combine addos.
void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg)
Transform PtrToInt(IntToPtr(x)) to x.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize, unsigned &ShiftVal)
Reduce a shift by a constant to an unmerge and a shift on a half sized type.
bool matchCommuteConstantToRHS(MachineInstr &MI)
Match constant LHS ops that should be commuted.
bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo)
void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo)
void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI)
void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo)
void applyBuildInstructionSteps(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Replace MI with a series of instructions described in MatchInfo.
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo)
Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y),...
MachineIRBuilder & Builder
bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo)
Combine select to integer min/max.
bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo)
Match: shr (and x, n), k -> ubfx x, pos, width.
bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector< Register > &Ops)
bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo)
Reassociate commutative binary operations like G_ADD.
bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo)
Push a binary operator through a select on constants.
bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo)
Do constant folding when opportunities are exposed after MIR building.
const MachineFunction & getMachineFunction() const
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx)
Check if operand OpIdx is zero.
bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo)
void applyUDivByPow2(MachineInstr &MI)
Given an G_UDIV MI expressing an unsigned divided by a pow2 constant, return expressions that impleme...
bool matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo)
Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
void applyAshShlToSextInreg(MachineInstr &MI, std::tuple< Register, int64_t > &MatchInfo)
void applySextTruncSextLoad(MachineInstr &MI)
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo)
bool matchCommuteFPConstantToRHS(MachineInstr &MI)
Match constant LHS FP ops that should be commuted.
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Abstract class that contains various methods for clients to notify about changes.
Represents any type of generic load or store.
Represents a logical binary operation.
Represents a G_PTR_ADD.
Represents a G_SELECT.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Helper class to build MachineInstr.
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Holds all the information related to register banks.
This class implements the register bank concept.
Definition: RegisterBank.h:28
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ ConstantFP
Definition: ISDOpcodes.h:77
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
std::function< void(MachineIRBuilder &)> BuildFnTy
InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
InstructionBuildSteps()=default
Operands to be added to the instruction.
OperandBuildSteps OperandFns
The opcode for the produced instruction.
InstructionStepsMatchInfo(std::initializer_list< InstructionBuildSteps > InstrsToBuild)
SmallVector< InstructionBuildSteps, 2 > InstrsToBuild
Describes instructions to be built during a combine.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
MachineInstr * MI
const RegisterBank * Bank