LLVM 23.0.0git
TargetInstrInfo.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the target machine instruction set to the code generator.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14#define LLVM_CODEGEN_TARGETINSTRINFO_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Uniformity.h"
30#include "llvm/MC/MCInstrInfo.h"
35#include <array>
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <utility>
40#include <vector>
41
42namespace llvm {
43
44class DFAPacketizer;
46class LiveIntervals;
47class LiveVariables;
49class MachineLoop;
50class MachineLoopInfo;
54class MCAsmInfo;
55class MCInst;
56struct MCSchedModel;
57class Module;
58class ScheduleDAG;
59class ScheduleDAGMI;
61class SDNode;
62class SelectionDAG;
63class SMSchedule;
65class RegScavenger;
70enum class MachineTraceStrategy;
71
72template <class T> class SmallVectorImpl;
73
74using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
75
79
81 : Destination(&Dest), Source(&Src) {}
82};
83
84/// Used to describe a register and immediate addition.
85struct RegImmPair {
87 int64_t Imm;
88
89 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
90};
91
92/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
93/// It holds the register values, the scale value and the displacement.
94/// It also holds a descriptor for the expression used to calculate the address
95/// from the operands.
97 enum class Formula {
98 Basic = 0, // BaseReg + ScaledReg * Scale + Displacement
99 SExtScaledReg = 1, // BaseReg + sext(ScaledReg) * Scale + Displacement
100 ZExtScaledReg = 2 // BaseReg + zext(ScaledReg) * Scale + Displacement
101 };
102
105 int64_t Scale = 0;
106 int64_t Displacement = 0;
108 ExtAddrMode() = default;
109};
110
111//---------------------------------------------------------------------------
112///
113/// TargetInstrInfo - Interface to description of machine instruction set
114///
116protected:
118
119 /// Subtarget specific sub-array of MCInstrInfo's RegClassByHwModeTables
120 /// (i.e. the table for the active HwMode). This should be indexed by
121 /// MCOperandInfo's RegClass field for LookupRegClassByHwMode operands.
122 const int16_t *const RegClassByHwMode;
123
124 TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode = ~0u,
125 unsigned CFDestroyOpcode = ~0u, unsigned CatchRetOpcode = ~0u,
126 unsigned ReturnOpcode = ~0u,
127 const int16_t *const RegClassByHwModeTable = nullptr)
128 : TRI(TRI), RegClassByHwMode(RegClassByHwModeTable),
129 CallFrameSetupOpcode(CFSetupOpcode),
130 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
131 ReturnOpcode(ReturnOpcode) {}
132
133public:
137
138 const TargetRegisterInfo &getRegisterInfo() const { return TRI; }
139
140 static bool isGenericOpcode(unsigned Opc) {
141 return Opc <= TargetOpcode::GENERIC_OP_END;
142 }
143
144 static bool isGenericAtomicRMWOpcode(unsigned Opc) {
145 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
146 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
147 }
148
149 /// \returns the subtarget appropriate RegClassID for \p OpInfo
150 ///
151 /// Note this shadows a version of getOpRegClassID in MCInstrInfo which takes
152 /// an additional argument for the subtarget's HwMode, since TargetInstrInfo
153 /// is owned by a subtarget in CodeGen but MCInstrInfo is a TargetMachine
154 /// constant.
155 int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const {
156 if (OpInfo.isLookupRegClassByHwMode())
157 return RegClassByHwMode[OpInfo.RegClass];
158 return OpInfo.RegClass;
159 }
160
161 /// Given a machine instruction descriptor, returns the register
162 /// class constraint for OpNum, or NULL.
163 virtual const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
164 unsigned OpNum) const;
165
166 /// Returns true if MI is an instruction we are unable to reason about
167 /// (like a call or something with unmodeled side effects).
168 virtual bool isGlobalMemoryObject(const MachineInstr *MI) const;
169
170 /// Return true if the instruction is trivially rematerializable, meaning it
171 /// has no side effects and requires no operands that aren't always available.
172 /// This means the only allowed uses are constants and unallocatable physical
173 /// registers so that the instructions result is independent of the place
174 /// in the function.
177 return false;
178 for (const MachineOperand &MO : MI.all_uses()) {
179 if (MO.getReg().isVirtual())
180 return false;
181 }
182 return true;
183 }
184
185 /// Return true if the instruction would be materializable at a point
186 /// in the containing function where all virtual register uses were
187 /// known to be live and available in registers.
188 bool isReMaterializable(const MachineInstr &MI) const {
189 return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF &&
190 MI.getNumOperands() == 1) ||
191 (MI.getDesc().isRematerializable() && isReMaterializableImpl(MI));
192 }
193
194 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
195 /// of instruction rematerialization or sinking.
196 virtual bool isIgnorableUse(const MachineOperand &MO) const {
197 return false;
198 }
199
200 virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo,
201 MachineCycleInfo *CI) const {
202 return true;
203 }
204
205 /// For a "cheap" instruction which doesn't enable additional sinking,
206 /// should MachineSink break a critical edge to sink it anyways?
208 return false;
209 }
210
211protected:
212 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
213 /// set, this hook lets the target specify whether the instruction is actually
214 /// rematerializable, taking into consideration its operands. This
215 /// predicate must return false if the instruction has any side effects other
216 /// than producing a value.
217 virtual bool isReMaterializableImpl(const MachineInstr &MI) const;
218
219 /// This method commutes the operands of the given machine instruction MI.
220 /// The operands to be commuted are specified by their indices OpIdx1 and
221 /// OpIdx2.
222 ///
223 /// If a target has any instructions that are commutable but require
224 /// converting to different instructions or making non-trivial changes
225 /// to commute them, this method can be overloaded to do that.
226 /// The default implementation simply swaps the commutable operands.
227 ///
228 /// If NewMI is false, MI is modified in place and returned; otherwise, a
229 /// new machine instruction is created and returned.
230 ///
231 /// Do not call this method for a non-commutable instruction.
232 /// Even though the instruction is commutable, the method may still
233 /// fail to commute the operands, null pointer is returned in such cases.
234 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
235 unsigned OpIdx1,
236 unsigned OpIdx2) const;
237
238 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
239 /// operand indices to (ResultIdx1, ResultIdx2).
240 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
241 /// predefined to some indices or be undefined (designated by the special
242 /// value 'CommuteAnyOperandIndex').
243 /// The predefined result indices cannot be re-defined.
244 /// The function returns true iff after the result pair redefinition
245 /// the fixed result pair is equal to or equivalent to the source pair of
246 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
247 /// the pairs (x,y) and (y,x) are equivalent.
248 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
249 unsigned CommutableOpIdx1,
250 unsigned CommutableOpIdx2);
251
252public:
253 /// These methods return the opcode of the frame setup/destroy instructions
254 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
255 /// order to abstract away the difference between operating with a frame
256 /// pointer and operating without, through the use of these two instructions.
257 /// A FrameSetup MI in MF implies MFI::AdjustsStack.
258 ///
259 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
260 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
261
262 /// Returns true if the argument is a frame pseudo instruction.
263 bool isFrameInstr(const MachineInstr &I) const {
264 return I.getOpcode() == getCallFrameSetupOpcode() ||
265 I.getOpcode() == getCallFrameDestroyOpcode();
266 }
267
268 /// Returns true if the argument is a frame setup pseudo instruction.
269 bool isFrameSetup(const MachineInstr &I) const {
270 return I.getOpcode() == getCallFrameSetupOpcode();
271 }
272
273 /// Returns size of the frame associated with the given frame instruction.
274 /// For frame setup instruction this is frame that is set up space set up
275 /// after the instruction. For frame destroy instruction this is the frame
276 /// freed by the caller.
277 /// Note, in some cases a call frame (or a part of it) may be prepared prior
278 /// to the frame setup instruction. It occurs in the calls that involve
279 /// inalloca arguments. This function reports only the size of the frame part
280 /// that is set up between the frame setup and destroy pseudo instructions.
281 int64_t getFrameSize(const MachineInstr &I) const {
282 assert(isFrameInstr(I) && "Not a frame instruction");
283 assert(I.getOperand(0).getImm() >= 0);
284 return I.getOperand(0).getImm();
285 }
286
287 /// Returns the total frame size, which is made up of the space set up inside
288 /// the pair of frame start-stop instructions and the space that is set up
289 /// prior to the pair.
290 int64_t getFrameTotalSize(const MachineInstr &I) const {
291 if (isFrameSetup(I)) {
292 assert(I.getOperand(1).getImm() >= 0 &&
293 "Frame size must not be negative");
294 return getFrameSize(I) + I.getOperand(1).getImm();
295 }
296 return getFrameSize(I);
297 }
298
299 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
300 unsigned getReturnOpcode() const { return ReturnOpcode; }
301
302 /// Returns the actual stack pointer adjustment made by an instruction
303 /// as part of a call sequence. By default, only call frame setup/destroy
304 /// instructions adjust the stack, but targets may want to override this
305 /// to enable more fine-grained adjustment, or adjust by a different value.
306 virtual int getSPAdjust(const MachineInstr &MI) const;
307
308 /// Return true if the instruction is a "coalescable" extension instruction.
309 /// That is, it's like a copy where it's legal for the source to overlap the
310 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
311 /// expected the pre-extension value is available as a subreg of the result
312 /// register. This also returns the sub-register index in SubIdx.
313 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
314 Register &DstReg, unsigned &SubIdx) const {
315 return false;
316 }
317
318 /// If the specified machine instruction is a direct
319 /// load from a stack slot, return the virtual or physical register number of
320 /// the destination along with the FrameIndex of the loaded stack slot. If
321 /// not, return 0. This predicate must return 0 if the instruction has
322 /// any side effects other than loading from the stack slot.
324 int &FrameIndex) const {
325 return 0;
326 }
327
328 /// Optional extension of isLoadFromStackSlot that returns the number of
329 /// bytes loaded from the stack. This must be implemented if a backend
330 /// supports partial stack slot spills/loads to further disambiguate
331 /// what the load does.
333 int &FrameIndex,
334 TypeSize &MemBytes) const {
335 MemBytes = TypeSize::getZero();
336 return isLoadFromStackSlot(MI, FrameIndex);
337 }
338
339 /// Check for post-frame ptr elimination stack locations as well.
340 /// This uses a heuristic so it isn't reliable for correctness.
342 int &FrameIndex) const {
343 return 0;
344 }
345
346 /// If the specified machine instruction has a load from a stack slot,
347 /// return true along with the FrameIndices of the loaded stack slot and the
348 /// machine mem operands containing the reference.
349 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
350 /// any instructions that loads from the stack. This is just a hint, as some
351 /// cases may be missed.
352 virtual bool hasLoadFromStackSlot(
353 const MachineInstr &MI,
355
356 /// If the specified machine instruction is a direct
357 /// store to a stack slot, return the virtual or physical register number of
358 /// the source reg along with the FrameIndex of the loaded stack slot. If
359 /// not, return 0. This predicate must return 0 if the instruction has
360 /// any side effects other than storing to the stack slot.
362 int &FrameIndex) const {
363 return 0;
364 }
365
366 /// Optional extension of isStoreToStackSlot that returns the number of
367 /// bytes stored to the stack. This must be implemented if a backend
368 /// supports partial stack slot spills/loads to further disambiguate
369 /// what the store does.
371 int &FrameIndex,
372 TypeSize &MemBytes) const {
373 MemBytes = TypeSize::getZero();
374 return isStoreToStackSlot(MI, FrameIndex);
375 }
376
377 /// Check for post-frame ptr elimination stack locations as well.
378 /// This uses a heuristic, so it isn't reliable for correctness.
380 int &FrameIndex) const {
381 return 0;
382 }
383
384 /// If the specified machine instruction has a store to a stack slot,
385 /// return true along with the FrameIndices of the loaded stack slot and the
386 /// machine mem operands containing the reference.
387 /// If not, return false. Unlike isStoreToStackSlot,
388 /// this returns true for any instructions that stores to the
389 /// stack. This is just a hint, as some cases may be missed.
390 virtual bool hasStoreToStackSlot(
391 const MachineInstr &MI,
393
394 /// Return true if the specified machine instruction
395 /// is a copy of one stack slot to another and has no other effect.
396 /// Provide the identity of the two frame indices.
397 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
398 int &SrcFrameIndex) const {
399 return false;
400 }
401
402 /// Compute the size in bytes and offset within a stack slot of a spilled
403 /// register or subregister.
404 ///
405 /// \param [out] Size in bytes of the spilled value.
406 /// \param [out] Offset in bytes within the stack slot.
407 /// \returns true if both Size and Offset are successfully computed.
408 ///
409 /// Not all subregisters have computable spill slots. For example,
410 /// subregisters registers may not be byte-sized, and a pair of discontiguous
411 /// subregisters has no single offset.
412 ///
413 /// Targets with nontrivial bigendian implementations may need to override
414 /// this, particularly to support spilled vector registers.
415 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
416 unsigned &Size, unsigned &Offset,
417 const MachineFunction &MF) const;
418
419 /// Return true if the given instruction is terminator that is unspillable,
420 /// according to isUnspillableTerminatorImpl.
422 return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
423 }
424
425 /// Sum the sizes of instructions inside of a BUNDLE, by calling \ref
426 /// getInstSizeInBytes on each. This is a utility function for implementations
427 /// of \ref getInstSizeInBytes to use.
428 unsigned getInstBundleSize(const MachineInstr &MI) const;
429
430 /// Returns the size in bytes of the specified MachineInstr, or ~0U
431 /// when this function is not implemented by a target.
432
433 /// For BUNDLE instructions, target implementations are responsible for
434 /// accounting for the size of all bundled instructions.
435 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
436 return ~0U;
437 }
438
440 /// Do not verify instruction size.
442 /// Check that the instruction size matches exactly.
444 /// Allow the reported instruction size to be larger than the actual size.
446 };
447
448 /// Determine whether/how the instruction size returned by
449 /// getInstSizeInBytes() should be verified.
450 virtual InstSizeVerifyMode
454
455 /// Return true if the instruction is as cheap as a move instruction.
456 ///
457 /// Targets for different archs need to override this, and different
458 /// micro-architectures can also be finely tuned inside.
459 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
460 return MI.isAsCheapAsAMove();
461 }
462
463 /// Return true if the instruction should be sunk by MachineSink.
464 ///
465 /// MachineSink determines on its own whether the instruction is safe to sink;
466 /// this gives the target a hook to override the default behavior with regards
467 /// to which instructions should be sunk.
468 ///
469 /// shouldPostRASink() is used by PostRAMachineSink.
470 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
471 virtual bool shouldPostRASink(const MachineInstr &MI) const { return true; }
472
473 /// Return false if the instruction should not be hoisted by MachineLICM.
474 ///
475 /// MachineLICM determines on its own whether the instruction is safe to
476 /// hoist; this gives the target a hook to extend this assessment and prevent
477 /// an instruction being hoisted from a given loop for target specific
478 /// reasons.
479 virtual bool shouldHoist(const MachineInstr &MI,
480 const MachineLoop *FromLoop) const {
481 return true;
482 }
483
484 /// Re-issue the specified 'original' instruction at the
485 /// specific location targeting a new destination register.
486 /// The register in Orig->getOperand(0).getReg() will be substituted by
487 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
488 /// SubIdx.
489 /// \p UsedLanes is a bitmask of the lanes that are live at the
490 /// rematerialization point.
491 virtual void
493 Register DestReg, unsigned SubIdx, const MachineInstr &Orig,
494 LaneBitmask UsedLanes = LaneBitmask::getAll()) const;
495
496 /// Clones instruction or the whole instruction bundle \p Orig and
497 /// insert into \p MBB before \p InsertBefore. The target may update operands
498 /// that are required to be unique.
499 ///
500 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
501 virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
502 MachineBasicBlock::iterator InsertBefore,
503 const MachineInstr &Orig) const;
504
505 /// This method must be implemented by targets that
506 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
507 /// may be able to convert a two-address instruction into one or more true
508 /// three-address instructions on demand. This allows the X86 target (for
509 /// example) to convert ADD and SHL instructions into LEA instructions if they
510 /// would require register copies due to two-addressness.
511 ///
512 /// This method returns a null pointer if the transformation cannot be
513 /// performed, otherwise it returns the last new instruction.
514 ///
515 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
516 /// replacing \p MI with new instructions, even though this function does not
517 /// remove MI.
519 LiveVariables *LV,
520 LiveIntervals *LIS) const {
521 return nullptr;
522 }
523
524 // This constant can be used as an input value of operand index passed to
525 // the method findCommutedOpIndices() to tell the method that the
526 // corresponding operand index is not pre-defined and that the method
527 // can pick any commutable operand.
528 static const unsigned CommuteAnyOperandIndex = ~0U;
529
530 /// This method commutes the operands of the given machine instruction MI.
531 ///
532 /// The operands to be commuted are specified by their indices OpIdx1 and
533 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
534 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
535 /// any arbitrarily chosen commutable operand. If both arguments are set to
536 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
537 /// operands; then commutes them if such operands could be found.
538 ///
539 /// If NewMI is false, MI is modified in place and returned; otherwise, a
540 /// new machine instruction is created and returned.
541 ///
542 /// Do not call this method for a non-commutable instruction or
543 /// for non-commuable operands.
544 /// Even though the instruction is commutable, the method may still
545 /// fail to commute the operands, null pointer is returned in such cases.
547 commuteInstruction(MachineInstr &MI, bool NewMI = false,
548 unsigned OpIdx1 = CommuteAnyOperandIndex,
549 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
550
551 /// Returns true iff the routine could find two commutable operands in the
552 /// given machine instruction.
553 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
554 /// If any of the INPUT values is set to the special value
555 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
556 /// operand, then returns its index in the corresponding argument.
557 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
558 /// looks for 2 commutable operands.
559 /// If INPUT values refer to some operands of MI, then the method simply
560 /// returns true if the corresponding operands are commutable and returns
561 /// false otherwise.
562 ///
563 /// For example, calling this method this way:
564 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
565 /// findCommutedOpIndices(MI, Op1, Op2);
566 /// can be interpreted as a query asking to find an operand that would be
567 /// commutable with the operand#1.
568 virtual bool findCommutedOpIndices(const MachineInstr &MI,
569 unsigned &SrcOpIdx1,
570 unsigned &SrcOpIdx2) const;
571
572 /// Returns true if the target has a preference on the operands order of
573 /// the given machine instruction. And specify if \p Commute is required to
574 /// get the desired operands order.
575 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
576 return false;
577 }
578
579 /// If possible, converts the instruction to a simplified/canonical form.
580 /// Returns true if the instruction was modified.
581 ///
582 /// This function is only called after register allocation. The MI will be
583 /// modified in place. This is called by passes such as
584 /// MachineCopyPropagation, where their mutation of the MI operands may
585 /// expose opportunities to convert the instruction to a simpler form (e.g.
586 /// a load of 0).
587 virtual bool simplifyInstruction(MachineInstr &MI) const { return false; }
588
589 /// A pair composed of a register and a sub-register index.
590 /// Used to give some type checking when modeling Reg:SubReg.
593 unsigned SubReg;
594
596 : Reg(Reg), SubReg(SubReg) {}
597
598 bool operator==(const RegSubRegPair& P) const {
599 return Reg == P.Reg && SubReg == P.SubReg;
600 }
601 bool operator!=(const RegSubRegPair& P) const {
602 return !(*this == P);
603 }
604 };
605
606 /// A pair composed of a pair of a register and a sub-register index,
607 /// and another sub-register index.
608 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
610 unsigned SubIdx;
611
613 unsigned SubIdx = 0)
615 };
616
617 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
618 /// and \p DefIdx.
619 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
620 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
621 /// flag are not added to this list.
622 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
623 /// two elements:
624 /// - %1:sub1, sub0
625 /// - %2<:0>, sub1
626 ///
627 /// \returns true if it is possible to build such an input sequence
628 /// with the pair \p MI, \p DefIdx. False otherwise.
629 ///
630 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
631 ///
632 /// \note The generic implementation does not provide any support for
633 /// MI.isRegSequenceLike(). In other words, one has to override
634 /// getRegSequenceLikeInputs for target specific instructions.
635 bool
636 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
637 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
638
639 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
640 /// and \p DefIdx.
641 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
642 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
643 /// - %1:sub1, sub0
644 ///
645 /// \returns true if it is possible to build such an input sequence
646 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
647 /// False otherwise.
648 ///
649 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
650 ///
651 /// \note The generic implementation does not provide any support for
652 /// MI.isExtractSubregLike(). In other words, one has to override
653 /// getExtractSubregLikeInputs for target specific instructions.
654 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
655 RegSubRegPairAndIdx &InputReg) const;
656
657 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
658 /// and \p DefIdx.
659 /// \p [out] BaseReg and \p [out] InsertedReg contain
660 /// the equivalent inputs of INSERT_SUBREG.
661 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
662 /// - BaseReg: %0:sub0
663 /// - InsertedReg: %1:sub1, sub3
664 ///
665 /// \returns true if it is possible to build such an input sequence
666 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
667 /// False otherwise.
668 ///
669 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
670 ///
671 /// \note The generic implementation does not provide any support for
672 /// MI.isInsertSubregLike(). In other words, one has to override
673 /// getInsertSubregLikeInputs for target specific instructions.
674 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
675 RegSubRegPair &BaseReg,
676 RegSubRegPairAndIdx &InsertedReg) const;
677
678 /// Return true if two machine instructions would produce identical values.
679 /// By default, this is only true when the two instructions
680 /// are deemed identical except for defs. If this function is called when the
681 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
682 /// aggressive checks.
683 virtual bool produceSameValue(const MachineInstr &MI0,
684 const MachineInstr &MI1,
685 const MachineRegisterInfo *MRI = nullptr) const;
686
687 /// \returns true if a branch from an instruction with opcode \p BranchOpc
688 /// bytes is capable of jumping to a position \p BrOffset bytes away.
689 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
690 int64_t BrOffset) const {
691 llvm_unreachable("target did not implement");
692 }
693
694 /// \returns The block that branch instruction \p MI jumps to.
696 llvm_unreachable("target did not implement");
697 }
698
699 /// Insert an unconditional indirect branch at the end of \p MBB to \p
700 /// NewDestBB. Optionally, insert the clobbered register restoring in \p
701 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
702 /// the offset of the position to insert the new branch.
704 MachineBasicBlock &NewDestBB,
705 MachineBasicBlock &RestoreBB,
706 const DebugLoc &DL, int64_t BrOffset = 0,
707 RegScavenger *RS = nullptr) const {
708 llvm_unreachable("target did not implement");
709 }
710
711 /// Analyze the branching code at the end of MBB, returning
712 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
713 /// implemented for a target). Upon success, this returns false and returns
714 /// with the following information in various cases:
715 ///
716 /// 1. If this block ends with no branches (it just falls through to its succ)
717 /// just return false, leaving TBB/FBB null.
718 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
719 /// the destination block.
720 /// 3. If this block ends with a conditional branch and it falls through to a
721 /// successor block, it sets TBB to be the branch destination block and a
722 /// list of operands that evaluate the condition. These operands can be
723 /// passed to other TargetInstrInfo methods to create new branches.
724 /// 4. If this block ends with a conditional branch followed by an
725 /// unconditional branch, it returns the 'true' destination in TBB, the
726 /// 'false' destination in FBB, and a list of operands that evaluate the
727 /// condition. These operands can be passed to other TargetInstrInfo
728 /// methods to create new branches.
729 ///
730 /// Note that removeBranch and insertBranch must be implemented to support
731 /// cases where this method returns success.
732 ///
733 /// If AllowModify is true, then this routine is allowed to modify the basic
734 /// block (e.g. delete instructions after the unconditional branch).
735 ///
736 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
737 /// before calling this function.
739 MachineBasicBlock *&FBB,
741 bool AllowModify = false) const {
742 return true;
743 }
744
745 /// Represents a predicate at the MachineFunction level. The control flow a
746 /// MachineBranchPredicate represents is:
747 ///
748 /// Reg = LHS `Predicate` RHS == ConditionDef
749 /// if Reg then goto TrueDest else goto FalseDest
750 ///
753 PRED_EQ, // True if two values are equal
754 PRED_NE, // True if two values are not equal
755 PRED_INVALID // Sentinel value
756 };
757
764
765 /// SingleUseCondition is true if ConditionDef is dead except for the
766 /// branch(es) at the end of the basic block.
767 ///
768 bool SingleUseCondition = false;
769
770 explicit MachineBranchPredicate() = default;
771 };
772
773 /// Analyze the branching code at the end of MBB and parse it into the
774 /// MachineBranchPredicate structure if possible. Returns false on success
775 /// and true on failure.
776 ///
777 /// If AllowModify is true, then this routine is allowed to modify the basic
778 /// block (e.g. delete instructions after the unconditional branch).
779 ///
782 bool AllowModify = false) const {
783 return true;
784 }
785
786 /// Remove the branching code at the end of the specific MBB.
787 /// This is only invoked in cases where analyzeBranch returns success. It
788 /// returns the number of instructions that were removed.
789 /// If \p BytesRemoved is non-null, report the change in code size from the
790 /// removed instructions.
792 int *BytesRemoved = nullptr) const {
793 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
794 }
795
796 /// Insert branch code into the end of the specified MachineBasicBlock. The
797 /// operands to this method are the same as those returned by analyzeBranch.
798 /// This is only invoked in cases where analyzeBranch returns success. It
799 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
800 /// report the change in code size from the added instructions.
801 ///
802 /// It is also invoked by tail merging to add unconditional branches in
803 /// cases where analyzeBranch doesn't apply because there was no original
804 /// branch to analyze. At least this much must be implemented, else tail
805 /// merging needs to be disabled.
806 ///
807 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
808 /// before calling this function.
812 const DebugLoc &DL,
813 int *BytesAdded = nullptr) const {
814 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
815 }
816
818 MachineBasicBlock *DestBB,
819 const DebugLoc &DL,
820 int *BytesAdded = nullptr) const {
821 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
822 BytesAdded);
823 }
824
825 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
826 /// implementations to query attributes of the loop being pipelined and to
827 /// apply target-specific updates to the loop once pipelining is complete.
829 public:
831 /// Return true if the given instruction should not be pipelined and should
832 /// be ignored. An example could be a loop comparison, or induction variable
833 /// update with no users being pipelined.
834 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
835
836 /// Return true if the proposed schedule should used. Otherwise return
837 /// false to not pipeline the loop. This function should be used to ensure
838 /// that pipelined loops meet target-specific quality heuristics.
840 return true;
841 }
842
843 /// Create a condition to determine if the trip count of the loop is greater
844 /// than TC, where TC is always one more than for the previous prologue or
845 /// 0 if this is being called for the outermost prologue.
846 ///
847 /// If the trip count is statically known to be greater than TC, return
848 /// true. If the trip count is statically known to be not greater than TC,
849 /// return false. Otherwise return nullopt and fill out Cond with the test
850 /// condition.
851 ///
852 /// Note: This hook is guaranteed to be called from the innermost to the
853 /// outermost prologue of the loop being software pipelined.
854 virtual std::optional<bool>
857
858 /// Create a condition to determine if the remaining trip count for a phase
859 /// is greater than TC. Some instructions such as comparisons may be
860 /// inserted at the bottom of MBB. All instructions expanded for the
861 /// phase must be inserted in MBB before calling this function.
862 /// LastStage0Insts is the map from the original instructions scheduled at
863 /// stage#0 to the expanded instructions for the last iteration of the
864 /// kernel. LastStage0Insts is intended to obtain the instruction that
865 /// refers the latest loop counter value.
866 ///
867 /// MBB can also be a predecessor of the prologue block. Then
868 /// LastStage0Insts must be empty and the compared value is the initial
869 /// value of the trip count.
874 "Target didn't implement "
875 "PipelinerLoopInfo::createRemainingIterationsGreaterCondition!");
876 }
877
878 /// Modify the loop such that the trip count is
879 /// OriginalTC + TripCountAdjust.
880 virtual void adjustTripCount(int TripCountAdjust) = 0;
881
882 /// Called when the loop's preheader has been modified to NewPreheader.
883 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
884
885 /// Called when the loop is being removed. Any instructions in the preheader
886 /// should be removed.
887 ///
888 /// Once this function is called, no other functions on this object are
889 /// valid; the loop has been removed.
890 virtual void disposed(LiveIntervals *LIS = nullptr) {}
891
892 /// Return true if the target can expand pipelined schedule with modulo
893 /// variable expansion.
894 virtual bool isMVEExpanderSupported() { return false; }
895 };
896
897 /// Analyze loop L, which must be a single-basic-block loop, and if the
898 /// conditions can be understood enough produce a PipelinerLoopInfo object.
899 virtual std::unique_ptr<PipelinerLoopInfo>
901 return nullptr;
902 }
903
904 /// Analyze the loop code, return true if it cannot be understood. Upon
905 /// success, this function returns false and returns information about the
906 /// induction variable and compare instruction used at the end.
907 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
908 MachineInstr *&CmpInst) const {
909 return true;
910 }
911
912 /// Generate code to reduce the loop iteration by one and check if the loop
913 /// is finished. Return the value/register of the new loop count. We need
914 /// this function when peeling off one or more iterations of a loop. This
915 /// function assumes the nth iteration is peeled first.
917 MachineBasicBlock &PreHeader,
918 MachineInstr *IndVar, MachineInstr &Cmp,
921 unsigned Iter, unsigned MaxIter) const {
922 llvm_unreachable("Target didn't implement ReduceLoopCount");
923 }
924
925 /// Delete the instruction OldInst and everything after it, replacing it with
926 /// an unconditional branch to NewDest. This is used by the tail merging pass.
927 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
928 MachineBasicBlock *NewDest) const;
929
930 /// Return true if it's legal to split the given basic
931 /// block at the specified instruction (i.e. instruction would be the start
932 /// of a new basic block).
935 return true;
936 }
937
938 /// Return true if it's profitable to predicate
939 /// instructions with accumulated instruction latency of "NumCycles"
940 /// of the specified basic block, where the probability of the instructions
941 /// being executed is given by Probability, and Confidence is a measure
942 /// of our confidence that it will be properly predicted.
943 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
944 unsigned ExtraPredCycles,
945 BranchProbability Probability) const {
946 return false;
947 }
948
949 /// Second variant of isProfitableToIfCvt. This one
950 /// checks for the case where two basic blocks from true and false path
951 /// of a if-then-else (diamond) are predicated on mutually exclusive
952 /// predicates, where the probability of the true path being taken is given
953 /// by Probability, and Confidence is a measure of our confidence that it
954 /// will be properly predicted.
955 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
956 unsigned ExtraTCycles,
957 MachineBasicBlock &FMBB, unsigned NumFCycles,
958 unsigned ExtraFCycles,
959 BranchProbability Probability) const {
960 return false;
961 }
962
963 /// Return true if it's profitable for if-converter to duplicate instructions
964 /// of specified accumulated instruction latencies in the specified MBB to
965 /// enable if-conversion.
966 /// The probability of the instructions being executed is given by
967 /// Probability, and Confidence is a measure of our confidence that it
968 /// will be properly predicted.
970 unsigned NumCycles,
971 BranchProbability Probability) const {
972 return false;
973 }
974
975 /// Return the increase in code size needed to predicate a contiguous run of
976 /// NumInsts instructions.
978 unsigned NumInsts) const {
979 return 0;
980 }
981
982 /// Return an estimate for the code size reduction (in bytes) which will be
983 /// caused by removing the given branch instruction during if-conversion.
984 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
985 return getInstSizeInBytes(MI);
986 }
987
988 /// Return true if it's profitable to unpredicate
989 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
990 /// exclusive predicates.
991 /// e.g.
992 /// subeq r0, r1, #1
993 /// addne r0, r1, #1
994 /// =>
995 /// sub r0, r1, #1
996 /// addne r0, r1, #1
997 ///
998 /// This may be profitable is conditional instructions are always executed.
1000 MachineBasicBlock &FMBB) const {
1001 return false;
1002 }
1003
1004 /// Return true if it is possible to insert a select
1005 /// instruction that chooses between TrueReg and FalseReg based on the
1006 /// condition code in Cond.
1007 ///
1008 /// When successful, also return the latency in cycles from TrueReg,
1009 /// FalseReg, and Cond to the destination register. In most cases, a select
1010 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
1011 ///
1012 /// Some x86 implementations have 2-cycle cmov instructions.
1013 ///
1014 /// @param MBB Block where select instruction would be inserted.
1015 /// @param Cond Condition returned by analyzeBranch.
1016 /// @param DstReg Virtual dest register that the result should write to.
1017 /// @param TrueReg Virtual register to select when Cond is true.
1018 /// @param FalseReg Virtual register to select when Cond is false.
1019 /// @param CondCycles Latency from Cond+Branch to select output.
1020 /// @param TrueCycles Latency from TrueReg to select output.
1021 /// @param FalseCycles Latency from FalseReg to select output.
1024 Register TrueReg, Register FalseReg,
1025 int &CondCycles, int &TrueCycles,
1026 int &FalseCycles) const {
1027 return false;
1028 }
1029
1030 /// Insert a select instruction into MBB before I that will copy TrueReg to
1031 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
1032 ///
1033 /// This function can only be called after canInsertSelect() returned true.
1034 /// The condition in Cond comes from analyzeBranch, and it can be assumed
1035 /// that the same flags or registers required by Cond are available at the
1036 /// insertion point.
1037 ///
1038 /// @param MBB Block where select instruction should be inserted.
1039 /// @param I Insertion point.
1040 /// @param DL Source location for debugging.
1041 /// @param DstReg Virtual register to be defined by select instruction.
1042 /// @param Cond Condition as computed by analyzeBranch.
1043 /// @param TrueReg Virtual register to copy when Cond is true.
1044 /// @param FalseReg Virtual register to copy when Cons is false.
1048 Register TrueReg, Register FalseReg) const {
1049 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
1050 }
1051
1052 /// Given an instruction marked as `isSelect = true`, attempt to optimize MI
1053 /// by merging it with one of its operands. Returns nullptr on failure.
1054 ///
1055 /// When successful, returns the new select instruction. The client is
1056 /// responsible for deleting MI.
1057 ///
1058 /// If both sides of the select can be optimized, PreferFalse is used to pick
1059 /// a side.
1060 ///
1061 /// @param MI Optimizable select instruction.
1062 /// @param NewMIs Set that record all MIs in the basic block up to \p
1063 /// MI. Has to be updated with any newly created MI or deleted ones.
1064 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
1065 /// @returns Optimized instruction or NULL.
1068 bool PreferFalse = false) const {
1069 assert(MI.isSelect() && "MI must be a select instruction");
1070 return nullptr;
1071 }
1072
1073 /// Emit instructions to copy a pair of physical registers.
1074 ///
1075 /// This function should support copies within any legal register class as
1076 /// well as any cross-class copies created during instruction selection.
1077 ///
1078 /// The source and destination registers may overlap, which may require a
1079 /// careful implementation when multiple copy instructions are required for
1080 /// large registers. See for example the ARM target.
1081 ///
1082 /// If RenamableDest is true, the copy instruction's destination operand is
1083 /// marked renamable.
1084 /// If RenamableSrc is true, the copy instruction's source operand is
1085 /// marked renamable.
1088 Register DestReg, Register SrcReg, bool KillSrc,
1089 bool RenamableDest = false,
1090 bool RenamableSrc = false) const {
1091 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
1092 }
1093
1094 /// Allow targets to tell MachineVerifier whether a specific register
1095 /// MachineOperand can be used as part of PC-relative addressing.
1096 /// PC-relative addressing modes in many CISC architectures contain
1097 /// (non-PC) registers as offsets or scaling values, which inherently
1098 /// tags the corresponding MachineOperand with OPERAND_PCREL.
1099 ///
1100 /// @param MO The MachineOperand in question. MO.isReg() should always
1101 /// be true.
1102 /// @return Whether this operand is allowed to be used PC-relatively.
1103 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1104 return false;
1105 }
1106
1107 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
1108 /// using a jump table, otherwise -1.
1109 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }
1110
1111protected:
1112 /// Target-dependent implementation for IsCopyInstr.
1113 /// If the specific machine instruction is a instruction that moves/copies
1114 /// value from one register to another register return destination and source
1115 /// registers as machine operands.
1116 virtual std::optional<DestSourcePair>
1118 return std::nullopt;
1119 }
1120
1121 virtual std::optional<DestSourcePair>
1123 return std::nullopt;
1124 }
1125
1126 /// Return true if the given terminator MI is not expected to spill. This
1127 /// sets the live interval as not spillable and adjusts phi node lowering to
1128 /// not introduce copies after the terminator. Use with care, these are
1129 /// currently used for hardware loop intrinsics in very controlled situations,
1130 /// created prior to registry allocation in loops that only have single phi
1131 /// users for the terminators value. They may run out of registers if not used
1132 /// carefully.
1133 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1134 return false;
1135 }
1136
1137public:
1138 /// If the specific machine instruction is a instruction that moves/copies
1139 /// value from one register to another register return destination and source
1140 /// registers as machine operands.
1141 /// For COPY-instruction the method naturally returns destination and source
1142 /// registers as machine operands, for all other instructions the method calls
1143 /// target-dependent implementation.
1144 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1145 if (MI.isCopy()) {
1146 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1147 }
1148 return isCopyInstrImpl(MI);
1149 }
1150
1151 // Similar to `isCopyInstr`, but adds non-copy semantics on MIR, but
1152 // ultimately generates a copy instruction.
1153 std::optional<DestSourcePair> isCopyLikeInstr(const MachineInstr &MI) const {
1154 if (auto IsCopyInstr = isCopyInstr(MI))
1155 return IsCopyInstr;
1156 return isCopyLikeInstrImpl(MI);
1157 }
1158
1159 bool isFullCopyInstr(const MachineInstr &MI) const {
1160 auto DestSrc = isCopyInstr(MI);
1161 if (!DestSrc)
1162 return false;
1163
1164 const MachineOperand *DestRegOp = DestSrc->Destination;
1165 const MachineOperand *SrcRegOp = DestSrc->Source;
1166 return !DestRegOp->getSubReg() && !SrcRegOp->getSubReg();
1167 }
1168
1169 /// If the specific machine instruction is an instruction that adds an
1170 /// immediate value and a register, and stores the result in the given
1171 /// register \c Reg, return a pair of the source register and the offset
1172 /// which has been added.
1173 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1174 Register Reg) const {
1175 return std::nullopt;
1176 }
1177
1178 /// Returns true if MI is an instruction that defines Reg to have a constant
1179 /// value and the value is recorded in ImmVal. The ImmVal is a result that
1180 /// should be interpreted as modulo size of Reg.
1182 const Register Reg,
1183 int64_t &ImmVal) const {
1184 return false;
1185 }
1186
1187 /// Store the specified register of the given register class to the specified
1188 /// stack frame index. The store instruction is to be added to the given
1189 /// machine basic block before the specified machine instruction. If isKill
1190 /// is true, the register operand is the last use and must be marked kill. If
1191 /// \p SrcReg is being directly spilled as part of assigning a virtual
1192 /// register, \p VReg is the register being assigned. This additional register
1193 /// argument is needed for certain targets when invoked from RegAllocFast to
1194 /// map the spilled physical register to its virtual register. A null register
1195 /// can be passed elsewhere. The \p Flags is used to set appropriate machine
1196 /// flags on the spill instruction e.g. FrameSetup flag on a callee saved
1197 /// register spill instruction, part of prologue, during the frame lowering.
1200 bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
1202 llvm_unreachable("Target didn't implement "
1203 "TargetInstrInfo::storeRegToStackSlot!");
1204 }
1205
1206 /// Load the specified register of the given register class from the specified
1207 /// stack frame index. The load instruction is to be added to the given
1208 /// machine basic block before the specified machine instruction. If \p
1209 /// DestReg is being directly reloaded as part of assigning a virtual
1210 /// register, \p VReg is the register being assigned. This additional register
1211 /// argument is needed for certain targets when invoked from RegAllocFast to
1212 /// map the loaded physical register to its virtual register. A null register
1213 /// can be passed elsewhere. \p SubReg is required for partial reload of
1214 /// tuples if the target supports it. The \p Flags is used to set appropriate
1215 /// machine flags on the spill instruction e.g. FrameDestroy flag on a callee
1216 /// saved register reload instruction, part of epilogue, during the frame
1217 /// lowering.
1220 int FrameIndex, const TargetRegisterClass *RC, Register VReg,
1221 unsigned SubReg = 0,
1223 llvm_unreachable("Target didn't implement "
1224 "TargetInstrInfo::loadRegFromStackSlot!");
1225 }
1226
1227 /// This function is called for all pseudo instructions
1228 /// that remain after register allocation. Many pseudo instructions are
1229 /// created to help register allocation. This is the place to convert them
1230 /// into real instructions. The target can edit MI in place, or it can insert
1231 /// new instructions and erase MI. The function should return true if
1232 /// anything was changed.
1233 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1234
1235 /// Check whether the target can fold a load that feeds a subreg operand
1236 /// (or a subreg operand that feeds a store).
1237 /// For example, X86 may want to return true if it can fold
1238 /// movl (%esp), %eax
1239 /// subb, %al, ...
1240 /// Into:
1241 /// subb (%esp), ...
1242 ///
1243 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1244 /// reject subregs - but since this behavior used to be enforced in the
1245 /// target-independent code, moving this responsibility to the targets
1246 /// has the potential of causing nasty silent breakage in out-of-tree targets.
1247 virtual bool isSubregFoldable() const { return false; }
1248
1249 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1250 /// operands which can't be folded into stack references. Operands outside
1251 /// of the range are most likely foldable but it is not guaranteed.
1252 /// These instructions are unique in that stack references for some operands
1253 /// have the same execution cost (e.g. none) as the unfolded register forms.
1254 /// The ranged return is guaranteed to include all operands which can't be
1255 /// folded at zero cost.
1256 virtual std::pair<unsigned, unsigned>
1257 getPatchpointUnfoldableRange(const MachineInstr &MI) const;
1258
1259 /// Attempt to fold a load or store of the specified stack
1260 /// slot into the specified machine instruction for the specified operand(s).
1261 /// If this is possible, a new instruction is returned with the specified
1262 /// operand folded, otherwise NULL is returned.
1263 /// The new instruction is inserted before MI, and the client is responsible
1264 /// for removing the old instruction.
1265 /// If a copy instruction being created during fold, return it by CopyMI.
1266 /// If VRM is passed, the assigned physregs can be inspected by target to
1267 /// decide on using an opcode (note that those assignments can still change).
1268 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1269 int FI, MachineInstr *&CopyMI,
1270 LiveIntervals *LIS = nullptr,
1271 VirtRegMap *VRM = nullptr) const;
1272
1273 /// Same as the previous version except it allows folding of any load and
1274 /// store from / to any address, not just from a specific stack slot.
1275 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1276 MachineInstr &LoadMI, MachineInstr *&CopyMI,
1277 LiveIntervals *LIS = nullptr) const;
1278
1279 /// This function defines the logic to lower COPY instruction to
1280 /// target specific instruction(s).
1281 void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const;
1282
1283 /// Return true when there is potentially a faster code sequence
1284 /// for an instruction chain ending in \p Root. All potential patterns are
1285 /// returned in the \p Patterns vector. Patterns should be sorted in priority
1286 /// order since the pattern evaluator stops checking as soon as it finds a
1287 /// faster sequence.
1288 /// \param Root - Instruction that could be combined with one of its operands
1289 /// \param Patterns - Vector of possible combination patterns
1290 virtual bool getMachineCombinerPatterns(MachineInstr &Root,
1291 SmallVectorImpl<unsigned> &Patterns,
1292 bool DoRegPressureReduce) const;
1293
1294 /// Return true if target supports reassociation of instructions in machine
1295 /// combiner pass to reduce register pressure for a given BB.
1296 virtual bool
1298 const RegisterClassInfo *RegClassInfo) const {
1299 return false;
1300 }
1301
1302 /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1303 virtual void
1305 SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1306
1307 /// Return true when a code sequence can improve throughput. It
1308 /// should be called only for instructions in loops.
1309 /// \param Pattern - combiner pattern
1310 virtual bool isThroughputPattern(unsigned Pattern) const;
1311
1312 /// Return the objective of a combiner pattern.
1313 /// \param Pattern - combiner pattern
1314 virtual CombinerObjective getCombinerObjective(unsigned Pattern) const;
1315
1316 /// Return true if the input \P Inst is part of a chain of dependent ops
1317 /// that are suitable for reassociation, otherwise return false.
1318 /// If the instruction's operands must be commuted to have a previous
1319 /// instruction of the same type define the first source operand, \P Commuted
1320 /// will be set to true.
1321 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1322
1323 /// Return true when \P Inst is both associative and commutative. If \P Invert
1324 /// is true, then the inverse of \P Inst operation must be tested.
1326 bool Invert = false) const {
1327 return false;
1328 }
1329
1330 /// Find chains of accumulations that can be rewritten as a tree for increased
1331 /// ILP.
1332 bool getAccumulatorReassociationPatterns(
1333 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns) const;
1334
1335 /// Find the chain of accumulator instructions in \P MBB and return them in
1336 /// \P Chain.
1337 void getAccumulatorChain(MachineInstr *CurrentInstr,
1338 SmallVectorImpl<Register> &Chain) const;
1339
1340 /// Return true when \P OpCode is an instruction which performs
1341 /// accumulation into one of its operand registers.
1342 virtual bool isAccumulationOpcode(unsigned Opcode) const { return false; }
1343
1344 /// Returns an opcode which defines the accumulator used by \P Opcode.
1345 virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const {
1346 llvm_unreachable("Function not implemented for target!");
1347 return 0;
1348 }
1349
1350 /// Returns the opcode that should be use to reduce accumulation registers.
1351 virtual unsigned
1352 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const {
1353 llvm_unreachable("Function not implemented for target!");
1354 return 0;
1355 }
1356
1357 /// Reduces branches of the accumulator tree into a single register.
1358 void reduceAccumulatorTree(SmallVectorImpl<Register> &RegistersToReduce,
1360 MachineFunction &MF, MachineInstr &Root,
1362 DenseMap<Register, unsigned> &InstrIdxForVirtReg,
1363 Register ResultReg) const;
1364
1365 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1366 /// for sub and vice versa).
1367 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1368 return std::nullopt;
1369 }
1370
1371 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1372 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1373
1374 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1375 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1376 const MachineBasicBlock *MBB) const;
1377
1378 /// Return true when \P Inst has reassociable sibling.
1379 virtual bool hasReassociableSibling(const MachineInstr &Inst,
1380 bool &Commuted) const;
1381
1382 /// When getMachineCombinerPatterns() finds patterns, this function generates
1383 /// the instructions that could replace the original code sequence. The client
1384 /// has to decide whether the actual replacement is beneficial or not.
1385 /// \param Root - Instruction that could be combined with one of its operands
1386 /// \param Pattern - Combination pattern for Root
1387 /// \param InsInstrs - Vector of new instructions that implement Pattern
1388 /// \param DelInstrs - Old instructions, including Root, that could be
1389 /// replaced by InsInstr
1390 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1391 /// InsInstr that defines it
1392 virtual void genAlternativeCodeSequence(
1393 MachineInstr &Root, unsigned Pattern,
1396 DenseMap<Register, unsigned> &InstIdxForVirtReg) const;
1397
1398 /// When calculate the latency of the root instruction, accumulate the
1399 /// latency of the sequence to the root latency.
1400 /// \param Root - Instruction that could be combined with one of its operands
1402 return true;
1403 }
1404
1405 /// The returned array encodes the operand index for each parameter because
1406 /// the operands may be commuted; the operand indices for associative
1407 /// operations might also be target-specific. Each element specifies the index
1408 /// of {Prev, A, B, X, Y}.
1409 virtual void
1410 getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern,
1411 std::array<unsigned, 5> &OperandIndices) const;
1412
1413 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1414 /// reduce critical path length.
1415 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern,
1419 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const;
1420
1421 /// Reassociation of some instructions requires inverse operations (e.g.
1422 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1423 /// (new root opcode, new prev opcode) that must be used to reassociate \P
1424 /// Root and \P Prev accoring to \P Pattern.
1425 std::pair<unsigned, unsigned>
1426 getReassociationOpcodes(unsigned Pattern, const MachineInstr &Root,
1427 const MachineInstr &Prev) const;
1428
1429 /// The limit on resource length extension we accept in MachineCombiner Pass.
1430 virtual int getExtendResourceLenLimit() const { return 0; }
1431
1432 /// This is an architecture-specific helper function of reassociateOps.
1433 /// Set special operand attributes for new instructions after reassociation.
1434 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1435 MachineInstr &NewMI1,
1436 MachineInstr &NewMI2) const {}
1437
1438 /// Return true when a target supports MachineCombiner.
1439 virtual bool useMachineCombiner() const { return false; }
1440
1441 /// Return a strategy that MachineCombiner must use when creating traces.
1442 virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const;
1443
1444 /// Return true if the given SDNode can be copied during scheduling
1445 /// even if it has glue.
1446 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1447
1448protected:
1449 /// Target-dependent implementation for foldMemoryOperand.
1450 /// Target-independent code in foldMemoryOperand will
1451 /// take care of adding a MachineMemOperand to the newly created instruction.
1452 /// The instruction and any auxiliary instructions necessary will be inserted
1453 /// at MI.
1454 virtual MachineInstr *
1456 ArrayRef<unsigned> Ops, int FrameIndex,
1457 MachineInstr *&CopyMI, LiveIntervals *LIS = nullptr,
1458 VirtRegMap *VRM = nullptr) const {
1459 return nullptr;
1460 }
1461
1462 /// Target-dependent implementation for foldMemoryOperand.
1463 /// Target-independent code in foldMemoryOperand will
1464 /// take care of adding a MachineMemOperand to the newly created instruction.
1465 /// The instruction and any auxiliary instructions necessary will be inserted
1466 /// at MI.
1467 virtual MachineInstr *
1470 MachineInstr *&CopyMI,
1471 LiveIntervals *LIS = nullptr) const {
1472 return nullptr;
1473 }
1474
1475 /// Target-dependent implementation of getRegSequenceInputs.
1476 ///
1477 /// \returns true if it is possible to build the equivalent
1478 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1479 ///
1480 /// \pre MI.isRegSequenceLike().
1481 ///
1482 /// \see TargetInstrInfo::getRegSequenceInputs.
1484 const MachineInstr &MI, unsigned DefIdx,
1485 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1486 return false;
1487 }
1488
1489 /// Target-dependent implementation of getExtractSubregInputs.
1490 ///
1491 /// \returns true if it is possible to build the equivalent
1492 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1493 ///
1494 /// \pre MI.isExtractSubregLike().
1495 ///
1496 /// \see TargetInstrInfo::getExtractSubregInputs.
1498 unsigned DefIdx,
1499 RegSubRegPairAndIdx &InputReg) const {
1500 return false;
1501 }
1502
1503 /// Target-dependent implementation of getInsertSubregInputs.
1504 ///
1505 /// \returns true if it is possible to build the equivalent
1506 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1507 ///
1508 /// \pre MI.isInsertSubregLike().
1509 ///
1510 /// \see TargetInstrInfo::getInsertSubregInputs.
1511 virtual bool
1513 RegSubRegPair &BaseReg,
1514 RegSubRegPairAndIdx &InsertedReg) const {
1515 return false;
1516 }
1517
1518public:
1519 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1520 /// a store or a load and a store into two or more instruction. If this is
1521 /// possible, returns true as well as the new instructions by reference.
1522 virtual bool
1524 bool UnfoldLoad, bool UnfoldStore,
1525 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1526 return false;
1527 }
1528
1530 SmallVectorImpl<SDNode *> &NewNodes) const {
1531 return false;
1532 }
1533
1534 /// Returns the opcode of the would be new
1535 /// instruction after load / store are unfolded from an instruction of the
1536 /// specified opcode. It returns zero if the specified unfolding is not
1537 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1538 /// index of the operand which will hold the register holding the loaded
1539 /// value.
1540 virtual unsigned
1541 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1542 unsigned *LoadRegIndex = nullptr) const {
1543 return 0;
1544 }
1545
1546 /// This is used by the pre-regalloc scheduler to determine if two loads are
1547 /// loading from the same base address. It should only return true if the base
1548 /// pointers are the same and the only differences between the two addresses
1549 /// are the offset. It also returns the offsets by reference.
1550 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1551 int64_t &Offset1,
1552 int64_t &Offset2) const {
1553 return false;
1554 }
1555
1556 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1557 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1558 /// On some targets if two loads are loading from
1559 /// addresses in the same cache line, it's better if they are scheduled
1560 /// together. This function takes two integers that represent the load offsets
1561 /// from the common base address. It returns true if it decides it's desirable
1562 /// to schedule the two loads together. "NumLoads" is the number of loads that
1563 /// have already been scheduled after Load1.
1564 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1565 int64_t Offset1, int64_t Offset2,
1566 unsigned NumLoads) const {
1567 return false;
1568 }
1569
1570 /// Get the base operand and byte offset of an instruction that reads/writes
1571 /// memory. This is a convenience function for callers that are only prepared
1572 /// to handle a single base operand.
1573 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1574 /// abstraction that supports negative offsets.
1575 bool getMemOperandWithOffset(const MachineInstr &MI,
1576 const MachineOperand *&BaseOp, int64_t &Offset,
1577 bool &OffsetIsScalable,
1578 const TargetRegisterInfo *TRI) const;
1579
1580 /// Get zero or more base operands and the byte offset of an instruction that
1581 /// reads/writes memory. Note that there may be zero base operands if the
1582 /// instruction accesses a constant address.
1583 /// It returns false if MI does not read/write memory.
1584 /// It returns false if base operands and offset could not be determined.
1585 /// It is not guaranteed to always recognize base operands and offsets in all
1586 /// cases.
1587 /// FIXME: Move Offset and OffsetIsScalable to some ElementCount-style
1588 /// abstraction that supports negative offsets.
1591 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
1592 const TargetRegisterInfo *TRI) const {
1593 return false;
1594 }
1595
1596 /// Return true if the instruction contains a base register and offset. If
1597 /// true, the function also sets the operand position in the instruction
1598 /// for the base register and offset.
1600 unsigned &BasePos,
1601 unsigned &OffsetPos) const {
1602 return false;
1603 }
1604
1605 /// Target dependent implementation to get the values constituting the address
1606 /// MachineInstr that is accessing memory. These values are returned as a
1607 /// struct ExtAddrMode which contains all relevant information to make up the
1608 /// address.
1609 virtual std::optional<ExtAddrMode>
1611 const TargetRegisterInfo *TRI) const {
1612 return std::nullopt;
1613 }
1614
1615 /// Check if it's possible and beneficial to fold the addressing computation
1616 /// `AddrI` into the addressing mode of the load/store instruction `MemI`. The
1617 /// memory instruction is a user of the virtual register `Reg`, which in turn
1618 /// is the ultimate destination of zero or more COPY instructions from the
1619 /// output register of `AddrI`.
1620 /// Return the adddressing mode after folding in `AM`.
1622 const MachineInstr &AddrI,
1623 ExtAddrMode &AM) const {
1624 return false;
1625 }
1626
1627 /// Emit a load/store instruction with the same value register as `MemI`, but
1628 /// using the address from `AM`. The addressing mode must have been obtained
1629 /// from `canFoldIntoAddr` for the same memory instruction.
1631 const ExtAddrMode &AM) const {
1632 llvm_unreachable("target did not implement emitLdStWithAddr()");
1633 }
1634
1635 /// Returns true if MI's Def is NullValueReg, and the MI
1636 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1637 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1638 /// function can return true even if becomes zero. Specifically cases such as
1639 /// NullValueReg = shl NullValueReg, 63.
1641 const Register NullValueReg,
1642 const TargetRegisterInfo *TRI) const {
1643 return false;
1644 }
1645
1646 /// If the instruction is an increment of a constant value, return the amount.
1647 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1648 return false;
1649 }
1650
1651 /// Returns true if the two given memory operations should be scheduled
1652 /// adjacent. Note that you have to add:
1653 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1654 /// or
1655 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1656 /// to TargetMachine::createMachineScheduler() to have an effect.
1657 ///
1658 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1659 /// \p Offset1 and \p Offset2 are the byte offsets for the memory
1660 /// operations.
1661 /// \p OffsetIsScalable1 and \p OffsetIsScalable2 indicate if the offset is
1662 /// scaled by a runtime quantity.
1663 /// \p ClusterSize is the number of operations in the resulting load/store
1664 /// cluster if this hook returns true.
1665 /// \p NumBytes is the number of bytes that will be loaded from all the
1666 /// clustered loads if this hook returns true.
1668 int64_t Offset1, bool OffsetIsScalable1,
1670 int64_t Offset2, bool OffsetIsScalable2,
1671 unsigned ClusterSize,
1672 unsigned NumBytes) const {
1673 llvm_unreachable("target did not implement shouldClusterMemOps()");
1674 }
1675
1676 /// Reverses the branch condition of the specified condition list,
1677 /// returning false on success and true if it cannot be reversed.
1678 virtual bool
1682
1683 /// Insert a noop into the instruction stream at the specified point.
1684 virtual void insertNoop(MachineBasicBlock &MBB,
1686
1687 /// Insert noops into the instruction stream at the specified point.
1688 virtual void insertNoops(MachineBasicBlock &MBB,
1690 unsigned Quantity) const;
1691
1692 /// Return the noop instruction to use for a noop.
1693 virtual MCInst getNop() const;
1694
1695 /// Return true for post-incremented instructions.
1696 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1697
1698 /// Returns true if the instruction is already predicated.
1699 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1700
1701 /// Assumes the instruction is already predicated and returns true if the
1702 /// instruction can be predicated again.
1703 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1704 assert(isPredicated(MI) && "Instruction is not predicated");
1705 return false;
1706 }
1707
1708 // Returns a MIRPrinter comment for this machine operand.
1709 virtual std::string
1710 createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
1711 unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1712
1713 /// Returns true if the instruction is a
1714 /// terminator instruction that has not been predicated.
1715 bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1716
1717 /// Returns true if MI is an unconditional tail call.
1718 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1719 return false;
1720 }
1721
1722 /// Returns true if the tail call can be made conditional on BranchCond.
1724 const MachineInstr &TailCall) const {
1725 return false;
1726 }
1727
1728 /// Replace the conditional branch in MBB with a conditional tail call.
1731 const MachineInstr &TailCall) const {
1732 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1733 }
1734
1735 /// Convert the instruction into a predicated instruction.
1736 /// It returns true if the operation was successful.
1737 virtual bool PredicateInstruction(MachineInstr &MI,
1738 ArrayRef<MachineOperand> Pred) const;
1739
1740 /// Returns true if the first specified predicate
1741 /// subsumes the second, e.g. GE subsumes GT.
1743 ArrayRef<MachineOperand> Pred2) const {
1744 return false;
1745 }
1746
1747 /// If the specified instruction defines any predicate
1748 /// or condition code register(s) used for predication, returns true as well
1749 /// as the definition predicate(s) by reference.
1750 /// SkipDead should be set to false at any point that dead
1751 /// predicate instructions should be considered as being defined.
1752 /// A dead predicate instruction is one that is guaranteed to be removed
1753 /// after a call to PredicateInstruction.
1755 std::vector<MachineOperand> &Pred,
1756 bool SkipDead) const {
1757 return false;
1758 }
1759
1760 /// Return true if the specified instruction can be predicated.
1761 /// By default, this returns true for every instruction with a
1762 /// PredicateOperand.
1763 virtual bool isPredicable(const MachineInstr &MI) const {
1764 return MI.getDesc().isPredicable();
1765 }
1766
1767 /// Return true if it's safe to move a machine
1768 /// instruction that defines the specified register class.
1769 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1770 return true;
1771 }
1772
1773 /// Return true if it's safe to move a machine instruction.
1774 /// This allows the backend to prevent certain special instruction
1775 /// sequences from being broken by instruction motion in optimization
1776 /// passes.
1777 /// By default, this returns true for every instruction.
1778 virtual bool isSafeToMove(const MachineInstr &MI,
1779 const MachineBasicBlock *MBB,
1780 const MachineFunction &MF) const {
1781 return true;
1782 }
1783
1784 /// Test if the given instruction should be considered a scheduling boundary.
1785 /// This primarily includes labels and terminators.
1786 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1787 const MachineBasicBlock *MBB,
1788 const MachineFunction &MF) const;
1789
1790 /// Measure the specified inline asm to determine an approximation of its
1791 /// length.
1792 virtual unsigned getInlineAsmLength(
1793 const char *Str, const MCAsmInfo &MAI,
1794 const TargetSubtargetInfo *STI = nullptr) const;
1795
1796 /// Allocate and return a hazard recognizer to use for this target when
1797 /// scheduling the machine instructions before register allocation.
1798 virtual ScheduleHazardRecognizer *
1799 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1800 const ScheduleDAG *DAG) const;
1801
1802 /// Allocate and return a hazard recognizer to use for this target when
1803 /// scheduling the machine instructions before register allocation.
1804 virtual ScheduleHazardRecognizer *
1805 CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1806 const ScheduleDAGMI *DAG) const;
1807
1808 /// Allocate and return a hazard recognizer to use for this target when
1809 /// scheduling the machine instructions after register allocation.
1810 virtual ScheduleHazardRecognizer *
1811 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1812 const ScheduleDAG *DAG) const;
1813
1814 /// Allocate and return a hazard recognizer to use for by non-scheduling
1815 /// passes.
1816 virtual ScheduleHazardRecognizer *
1818 MachineLoopInfo *MLI) const {
1819 return nullptr;
1820 }
1821
1822 /// Provide a global flag for disabling the PreRA hazard recognizer that
1823 /// targets may choose to honor.
1824 bool usePreRAHazardRecognizer() const;
1825
1826 /// For a comparison instruction, return the source registers
1827 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1828 /// compares against in CmpValue. Return true if the comparison instruction
1829 /// can be analyzed.
1830 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1831 Register &SrcReg2, int64_t &Mask,
1832 int64_t &Value) const {
1833 return false;
1834 }
1835
1836 /// See if the comparison instruction can be converted
1837 /// into something more efficient. E.g., on ARM most instructions can set the
1838 /// flags register, obviating the need for a separate CMP.
1839 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1840 Register SrcReg2, int64_t Mask,
1841 int64_t Value,
1842 const MachineRegisterInfo *MRI) const {
1843 return false;
1844 }
1845 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1846
1847 /// Try to remove the load by folding it to a register operand at the use.
1848 /// We fold the load instructions if and only if the
1849 /// def and use are in the same BB. We only look at one load and see
1850 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1851 /// defined by the load we are trying to fold. DefMI returns the machine
1852 /// instruction that defines FoldAsLoadDefReg, and the function returns
1853 /// the machine instruction generated due to folding. CopyMI returns the
1854 /// copy instruction possibly generated due to folding.
1855 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1856 const MachineRegisterInfo *MRI,
1857 Register &FoldAsLoadDefReg,
1859 MachineInstr *&CopyMI) const;
1860
1861 /// 'Reg' is known to be defined by a move immediate instruction,
1862 /// try to fold the immediate into the use instruction.
1863 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1864 /// then the caller may assume that DefMI has been erased from its parent
1865 /// block. The caller may assume that it will not be erased by this
1866 /// function otherwise.
1868 Register Reg, MachineRegisterInfo *MRI) const {
1869 return false;
1870 }
1871
1872 /// Return the number of u-operations the given machine
1873 /// instruction will be decoded to on the target cpu. The itinerary's
1874 /// IssueWidth is the number of microops that can be dispatched each
1875 /// cycle. An instruction with zero microops takes no dispatch resources.
1876 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1877 const MachineInstr &MI) const;
1878
1879 /// Return true for pseudo instructions that don't consume any
1880 /// machine resources in their current form. These are common cases that the
1881 /// scheduler should consider free, rather than conservatively handling them
1882 /// as instructions with no itinerary.
1883 bool isZeroCost(unsigned Opcode) const {
1884 return Opcode <= TargetOpcode::COPY;
1885 }
1886
1887 virtual std::optional<unsigned>
1888 getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode,
1889 unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const;
1890
1891 /// Compute and return the use operand latency of a given pair of def and use.
1892 /// In most cases, the static scheduling itinerary was enough to determine the
1893 /// operand latency. But it may not be possible for instructions with variable
1894 /// number of defs / uses.
1895 ///
1896 /// This is a raw interface to the itinerary that may be directly overridden
1897 /// by a target. Use computeOperandLatency to get the best estimate of
1898 /// latency.
1899 virtual std::optional<unsigned>
1900 getOperandLatency(const InstrItineraryData *ItinData,
1901 const MachineInstr &DefMI, unsigned DefIdx,
1902 const MachineInstr &UseMI, unsigned UseIdx) const;
1903
1904 /// Compute the instruction latency of a given instruction.
1905 /// If the instruction has higher cost when predicated, it's returned via
1906 /// PredCost.
1907 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1908 const MachineInstr &MI,
1909 unsigned *PredCost = nullptr) const;
1910
1911 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1912
1913 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1914 SDNode *Node) const;
1915
1916 /// Return the default expected latency for a def based on its opcode.
1917 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1918 const MachineInstr &DefMI) const;
1919
1920 /// Return true if this opcode has high latency to its result.
1921 virtual bool isHighLatencyDef(int opc) const { return false; }
1922
1923 /// Compute operand latency between a def of 'Reg'
1924 /// and a use in the current loop. Return true if the target considered
1925 /// it 'high'. This is used by optimization passes such as machine LICM to
1926 /// determine whether it makes sense to hoist an instruction out even in a
1927 /// high register pressure situation.
1928 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1929 const MachineRegisterInfo *MRI,
1930 const MachineInstr &DefMI, unsigned DefIdx,
1931 const MachineInstr &UseMI,
1932 unsigned UseIdx) const {
1933 return false;
1934 }
1935
1936 /// Compute operand latency of a def of 'Reg'. Return true
1937 /// if the target considered it 'low'.
1938 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1939 const MachineInstr &DefMI,
1940 unsigned DefIdx) const;
1941
1942 /// Perform target-specific instruction verification.
1943 virtual bool verifyInstruction(const MachineInstr &MI,
1944 StringRef &ErrInfo) const {
1945 return true;
1946 }
1947
1948 /// Return the current execution domain and bit mask of
1949 /// possible domains for instruction.
1950 ///
1951 /// Some micro-architectures have multiple execution domains, and multiple
1952 /// opcodes that perform the same operation in different domains. For
1953 /// example, the x86 architecture provides the por, orps, and orpd
1954 /// instructions that all do the same thing. There is a latency penalty if a
1955 /// register is written in one domain and read in another.
1956 ///
1957 /// This function returns a pair (domain, mask) containing the execution
1958 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1959 /// function can be used to change the opcode to one of the domains in the
1960 /// bit mask. Instructions whose execution domain can't be changed should
1961 /// return a 0 mask.
1962 ///
1963 /// The execution domain numbers don't have any special meaning except domain
1964 /// 0 is used for instructions that are not associated with any interesting
1965 /// execution domain.
1966 ///
1967 virtual std::pair<uint16_t, uint16_t>
1969 return std::make_pair(0, 0);
1970 }
1971
1972 /// Change the opcode of MI to execute in Domain.
1973 ///
1974 /// The bit (1 << Domain) must be set in the mask returned from
1975 /// getExecutionDomain(MI).
1976 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1977
1978 /// Returns the preferred minimum clearance
1979 /// before an instruction with an unwanted partial register update.
1980 ///
1981 /// Some instructions only write part of a register, and implicitly need to
1982 /// read the other parts of the register. This may cause unwanted stalls
1983 /// preventing otherwise unrelated instructions from executing in parallel in
1984 /// an out-of-order CPU.
1985 ///
1986 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1987 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1988 /// the instruction needs to wait for the old value of the register to become
1989 /// available:
1990 ///
1991 /// addps %xmm1, %xmm0
1992 /// movaps %xmm0, (%rax)
1993 /// cvtsi2ss %rbx, %xmm0
1994 ///
1995 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1996 /// instruction before it can issue, even though the high bits of %xmm0
1997 /// probably aren't needed.
1998 ///
1999 /// This hook returns the preferred clearance before MI, measured in
2000 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
2001 /// instructions before MI. It should only return a positive value for
2002 /// unwanted dependencies. If the old bits of the defined register have
2003 /// useful values, or if MI is determined to otherwise read the dependency,
2004 /// the hook should return 0.
2005 ///
2006 /// The unwanted dependency may be handled by:
2007 ///
2008 /// 1. Allocating the same register for an MI def and use. That makes the
2009 /// unwanted dependency identical to a required dependency.
2010 ///
2011 /// 2. Allocating a register for the def that has no defs in the previous N
2012 /// instructions.
2013 ///
2014 /// 3. Calling breakPartialRegDependency() with the same arguments. This
2015 /// allows the target to insert a dependency breaking instruction.
2016 ///
2017 virtual unsigned
2019 const TargetRegisterInfo *TRI) const {
2020 // The default implementation returns 0 for no partial register dependency.
2021 return 0;
2022 }
2023
2024 /// Return the minimum clearance before an instruction that reads an
2025 /// unused register.
2026 ///
2027 /// For example, AVX instructions may copy part of a register operand into
2028 /// the unused high bits of the destination register.
2029 ///
2030 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
2031 ///
2032 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
2033 /// false dependence on any previous write to %xmm0.
2034 ///
2035 /// This hook works similarly to getPartialRegUpdateClearance, except that it
2036 /// does not take an operand index. Instead sets \p OpNum to the index of the
2037 /// unused register.
2038 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
2039 const TargetRegisterInfo *TRI) const {
2040 // The default implementation returns 0 for no undef register dependency.
2041 return 0;
2042 }
2043
2044 /// Insert a dependency-breaking instruction
2045 /// before MI to eliminate an unwanted dependency on OpNum.
2046 ///
2047 /// If it wasn't possible to avoid a def in the last N instructions before MI
2048 /// (see getPartialRegUpdateClearance), this hook will be called to break the
2049 /// unwanted dependency.
2050 ///
2051 /// On x86, an xorps instruction can be used as a dependency breaker:
2052 ///
2053 /// addps %xmm1, %xmm0
2054 /// movaps %xmm0, (%rax)
2055 /// xorps %xmm0, %xmm0
2056 /// cvtsi2ss %rbx, %xmm0
2057 ///
2058 /// An <imp-kill> operand should be added to MI if an instruction was
2059 /// inserted. This ties the instructions together in the post-ra scheduler.
2060 ///
2061 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
2062 const TargetRegisterInfo *TRI) const {}
2063
2064 /// Create machine specific model for scheduling.
2065 virtual DFAPacketizer *
2067 return nullptr;
2068 }
2069
2070 /// Sometimes, it is possible for the target
2071 /// to tell, even without aliasing information, that two MIs access different
2072 /// memory addresses. This function returns true if two MIs access different
2073 /// memory addresses and false otherwise.
2074 ///
2075 /// Assumes any physical registers used to compute addresses have the same
2076 /// value for both instructions. (This is the most useful assumption for
2077 /// post-RA scheduling.)
2078 ///
2079 /// See also MachineInstr::mayAlias, which is implemented on top of this
2080 /// function.
2081 virtual bool
2083 const MachineInstr &MIb) const {
2084 assert(MIa.mayLoadOrStore() &&
2085 "MIa must load from or modify a memory location");
2086 assert(MIb.mayLoadOrStore() &&
2087 "MIb must load from or modify a memory location");
2088 return false;
2089 }
2090
2091 /// Return the value to use for the MachineCSE's LookAheadLimit,
2092 /// which is a heuristic used for CSE'ing phys reg defs.
2093 virtual unsigned getMachineCSELookAheadLimit() const {
2094 // The default lookahead is small to prevent unprofitable quadratic
2095 // behavior.
2096 return 5;
2097 }
2098
2099 /// Return the maximal number of alias checks on memory operands. For
2100 /// instructions with more than one memory operands, the alias check on a
2101 /// single MachineInstr pair has quadratic overhead and results in
2102 /// unacceptable performance in the worst case. The limit here is to clamp
2103 /// that maximal checks performed. Usually, that's the product of memory
2104 /// operand numbers from that pair of MachineInstr to be checked. For
2105 /// instance, with two MachineInstrs with 4 and 5 memory operands
2106 /// correspondingly, a total of 20 checks are required. With this limit set to
2107 /// 16, their alias check is skipped. We choose to limit the product instead
2108 /// of the individual instruction as targets may have special MachineInstrs
2109 /// with a considerably high number of memory operands, such as `ldm` in ARM.
2110 /// Setting this limit per MachineInstr would result in either too high
2111 /// overhead or too rigid restriction.
2112 virtual unsigned getMemOperandAACheckLimit() const { return 16; }
2113
2114 /// Return an array that contains the ids of the target indices (used for the
2115 /// TargetIndex machine operand) and their names.
2116 ///
2117 /// MIR Serialization is able to serialize only the target indices that are
2118 /// defined by this method.
2121 return {};
2122 }
2123
2124 /// Decompose the machine operand's target flags into two values - the direct
2125 /// target flag value and any of bit flags that are applied.
2126 virtual std::pair<unsigned, unsigned>
2128 return std::make_pair(0u, 0u);
2129 }
2130
2131 /// Return an array that contains the direct target flag values and their
2132 /// names.
2133 ///
2134 /// MIR Serialization is able to serialize only the target flags that are
2135 /// defined by this method.
2138 return {};
2139 }
2140
2141 /// Return an array that contains the bitmask target flag values and their
2142 /// names.
2143 ///
2144 /// MIR Serialization is able to serialize only the target flags that are
2145 /// defined by this method.
2148 return {};
2149 }
2150
2151 /// Return an array that contains the MMO target flag values and their
2152 /// names.
2153 ///
2154 /// MIR Serialization is able to serialize only the MMO target flags that are
2155 /// defined by this method.
2158 return {};
2159 }
2160
2161 /// Determines whether \p Inst is a tail call instruction. Override this
2162 /// method on targets that do not properly set MCID::Return and MCID::Call on
2163 /// tail call instructions."
2164 virtual bool isTailCall(const MachineInstr &Inst) const {
2165 return Inst.isReturn() && Inst.isCall();
2166 }
2167
2168 /// True if the instruction is bound to the top of its basic block and no
2169 /// other instructions shall be inserted before it. This can be implemented
2170 /// to prevent register allocator to insert spills for \p Reg before such
2171 /// instructions.
2173 Register Reg = Register()) const {
2174 return false;
2175 }
2176
2177 /// Allows targets to use appropriate copy instruction while spilitting live
2178 /// range of a register in register allocation.
2180 const MachineFunction &MF) const {
2181 return TargetOpcode::COPY;
2182 }
2183
2184 /// During PHI eleimination lets target to make necessary checks and
2185 /// insert the copy to the PHI destination register in a target specific
2186 /// manner.
2189 const DebugLoc &DL, Register Src, Register Dst) const {
2190 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2191 .addReg(Src);
2192 }
2193
2194 /// During PHI eleimination lets target to make necessary checks and
2195 /// insert the copy to the PHI destination register in a target specific
2196 /// manner.
2199 const DebugLoc &DL, Register Src,
2200 unsigned SrcSubReg,
2201 Register Dst) const {
2202 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
2203 .addReg(Src, {}, SrcSubReg);
2204 }
2205
2206 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
2207 /// information for a set of outlining candidates. Returns std::nullopt if the
2208 /// candidates are not suitable for outlining. \p MinRepeats is the minimum
2209 /// number of times the instruction sequence must be repeated.
2210 virtual std::optional<std::unique_ptr<outliner::OutlinedFunction>>
2212 const MachineModuleInfo &MMI,
2213 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
2214 unsigned MinRepeats) const {
2216 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
2217 }
2218
2219 /// Optional target hook to create the LLVM IR attributes for the outlined
2220 /// function. If overridden, the overriding function must call the default
2221 /// implementation.
2222 virtual void mergeOutliningCandidateAttributes(
2223 Function &F, std::vector<outliner::Candidate> &Candidates) const;
2224
2225protected:
2226 /// Target-dependent implementation for getOutliningTypeImpl.
2227 virtual outliner::InstrType
2229 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
2231 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
2232 }
2233
2234public:
2235 /// Returns how or if \p MIT should be outlined. \p Flags is the
2236 /// target-specific information returned by isMBBSafeToOutlineFrom.
2237 outliner::InstrType getOutliningType(const MachineModuleInfo &MMI,
2239 unsigned Flags) const;
2240
2241 /// Optional target hook that returns true if \p MBB is safe to outline from,
2242 /// and returns any target-specific information in \p Flags.
2243 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
2244 unsigned &Flags) const;
2245
2246 /// Optional target hook which partitions \p MBB into outlinable ranges for
2247 /// instruction mapping purposes. Each range is defined by two iterators:
2248 /// [start, end).
2249 ///
2250 /// Ranges are expected to be ordered top-down. That is, ranges closer to the
2251 /// top of the block should come before ranges closer to the end of the block.
2252 ///
2253 /// Ranges cannot overlap.
2254 ///
2255 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2256 ///
2257 /// All instructions not present in an outlinable range are considered
2258 /// illegal.
2259 virtual SmallVector<
2260 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2261 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
2262 return {std::make_pair(MBB.begin(), MBB.end())};
2263 }
2264
2265 /// Insert a custom frame for outlined functions.
2267 const outliner::OutlinedFunction &OF) const {
2269 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2270 }
2271
2272 /// Insert a call to an outlined function into the program.
2273 /// Returns an iterator to the spot where we inserted the call. This must be
2274 /// implemented by the target.
2278 outliner::Candidate &C) const {
2280 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2281 }
2282
2283 /// Insert an architecture-specific instruction to clear a register. If you
2284 /// need to avoid sideeffects (e.g. avoid XOR on x86, which sets EFLAGS), set
2285 /// \p AllowSideEffects to \p false.
2288 DebugLoc &DL,
2289 bool AllowSideEffects = true) const {
2290#if 0
2291 // FIXME: This should exist once all platforms that use stack protectors
2292 // implements it.
2294 "Target didn't implement TargetInstrInfo::buildClearRegister!");
2295#endif
2296 }
2297
2298 /// Return true if the function can safely be outlined from.
2299 /// A function \p MF is considered safe for outlining if an outlined function
2300 /// produced from instructions in F will produce a program which produces the
2301 /// same output for any set of given inputs.
2303 bool OutlineFromLinkOnceODRs) const {
2304 llvm_unreachable("Target didn't implement "
2305 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2306 }
2307
2308 /// Return true if the function should be outlined from by default.
2310 return false;
2311 }
2312
2313 /// Return true if the function is a viable candidate for machine function
2314 /// splitting. The criteria for if a function can be split may vary by target.
2315 virtual bool isFunctionSafeToSplit(const MachineFunction &MF) const;
2316
2317 /// Return true if the MachineBasicBlock can safely be split to the cold
2318 /// section. On AArch64, certain instructions may cause a block to be unsafe
2319 /// to split to the cold section.
2320 virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const {
2321 return true;
2322 }
2323
2324 /// Produce the expression describing the \p MI loading a value into
2325 /// the physical register \p Reg. This hook should only be used with
2326 /// \p MIs belonging to VReg-less functions.
2327 virtual std::optional<ParamLoadedValue>
2328 describeLoadedValue(const MachineInstr &MI, Register Reg) const;
2329
2330 /// Given the generic extension instruction \p ExtMI, returns true if this
2331 /// extension is a likely candidate for being folded into an another
2332 /// instruction.
2334 MachineRegisterInfo &MRI) const {
2335 return false;
2336 }
2337
2338 /// Return MIR formatter to format/parse MIR operands. Target can override
2339 /// this virtual function and return target specific MIR formatter.
2340 virtual const MIRFormatter *getMIRFormatter() const {
2341 if (!Formatter)
2342 Formatter = std::make_unique<MIRFormatter>();
2343 return Formatter.get();
2344 }
2345
2346 /// Returns the target-specific default value for tail duplication.
2347 /// This value will be used if the tail-dup-placement-threshold argument is
2348 /// not provided.
2349 virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
2350 return OptLevel >= CodeGenOptLevel::Aggressive ? 4 : 2;
2351 }
2352
2353 /// Returns the target-specific default value for tail merging.
2354 /// This value will be used if the tail-merge-size argument is not provided.
2355 virtual unsigned getTailMergeSize(const MachineFunction &MF) const {
2356 return 3;
2357 }
2358
2359 /// Returns the callee operand from the given \p MI.
2360 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2361 assert(MI.isCall());
2362
2363 switch (MI.getOpcode()) {
2364 case TargetOpcode::STATEPOINT:
2365 case TargetOpcode::STACKMAP:
2366 case TargetOpcode::PATCHPOINT:
2367 return MI.getOperand(3);
2368 default:
2369 return MI.getOperand(0);
2370 }
2371
2372 llvm_unreachable("impossible call instruction");
2373 }
2374
2375 /// Return the uniformity behavior of the given value.
2379
2380 /// Returns true if the given \p MI defines a TargetIndex operand that can be
2381 /// tracked by their offset, can have values, and can have debug info
2382 /// associated with it. If so, sets \p Index and \p Offset of the target index
2383 /// operand.
2384 virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index,
2385 int64_t &Offset) const {
2386 return false;
2387 }
2388
2389 // Get the call frame size just before MI.
2390 unsigned getCallFrameSizeAt(MachineInstr &MI) const;
2391
2392 /// Fills in the necessary MachineOperands to refer to a frame index.
2393 /// The best way to understand this is to print `asm(""::"m"(x));` after
2394 /// finalize-isel. Example:
2395 /// INLINEASM ... 262190 /* mem:m */, %stack.0.x.addr, 1, $noreg, 0, $noreg
2396 /// we would add placeholders for: ^ ^ ^ ^
2398 int FI) const {
2399 llvm_unreachable("unknown number of operands necessary");
2400 }
2401
2402 /// Inserts a code prefetch instruction before `InsertBefore` in block `MBB`
2403 /// targetting `GV`.
2404 virtual MachineInstr *
2406 MachineBasicBlock::iterator InsertBefore,
2407 const GlobalValue *GV) const {
2408 llvm_unreachable("target did not implement");
2409 }
2410
2411private:
2412 mutable std::unique_ptr<MIRFormatter> Formatter;
2413 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2414 unsigned CatchRetOpcode;
2415 unsigned ReturnOpcode;
2416};
2417
2418/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2422
2424 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2425 SubRegInfo::getEmptyKey());
2426 }
2427
2429 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2430 SubRegInfo::getTombstoneKey());
2431 }
2432
2433 /// Reuse getHashValue implementation from
2434 /// std::pair<unsigned, unsigned>.
2435 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2437 std::make_pair(Val.Reg, Val.SubReg));
2438 }
2439
2442 return LHS == RHS;
2443 }
2444};
2445
2446} // end namespace llvm
2447
2448#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define LLVM_ABI
Definition Compiler.h:213
DXIL Forward Handle Accesses
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
static bool isGlobalMemoryObject(MachineInstr *MI)
Return true if MI is an instruction we are unable to reason about (like something with unmodeled memo...
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Contains all data structures shared between the outliner implemented in MachineOutliner....
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getInstSizeInBytes(const MachineInstr &MI, const SystemZInstrInfo *TII)
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
A debug info location.
Definition DebugLoc.h:123
Itinerary data supplied by a subtarget to be used by a target.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
Definition MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:86
MIRFormater - Interface to format MIR operand based on target.
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCall(QueryType Type=AnyInBundle) const
A description of a memory reference used in the backend.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Represents one node in the SelectionDAG.
This class represents the scheduled code.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
virtual bool isMVEExpanderSupported()
Return true if the target can expand pipelined schedule with modulo variable expansion.
virtual void createRemainingIterationsGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, DenseMap< MachineInstr *, MachineInstr * > &LastStage0Insts)
Create a condition to determine if the remaining trip count for a phase is greater than TC.
virtual void adjustTripCount(int TripCountAdjust)=0
Modify the loop such that the trip count is OriginalTC + TripCountAdjust.
virtual void disposed(LiveIntervals *LIS=nullptr)
Called when the loop is being removed.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const =0
Return true if the given instruction should not be pipelined and should be ignored.
virtual void setPreheader(MachineBasicBlock *NewPreheader)=0
Called when the loop's preheader has been modified to NewPreheader.
virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
Return true if the proposed schedule should used.
virtual std::optional< bool > createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond)=0
Create a condition to determine if the trip count of the loop is greater than TC, where TC is always ...
TargetInstrInfo - Interface to description of machine instruction set.
virtual SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook which partitions MBB into outlinable ranges for instruction mapping purposes.
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
virtual bool isBasicBlockPrologue(const MachineInstr &MI, Register Reg=Register()) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual ValueUniformity getValueUniformity(const MachineInstr &MI) const
Return the uniformity behavior of the given value.
virtual bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const
Assumes the instruction is already predicated and returns true if the instruction can be predicated a...
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don't consume any machine resources in their current form.
virtual void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const
Insert an architecture-specific instruction to clear a register.
virtual void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const
Fills in the necessary MachineOperands to refer to a frame index.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual bool isSafeToSink(MachineInstr &MI, MachineBasicBlock *SuccToSinkTo, MachineCycleInfo *CI) const
const TargetRegisterInfo & TRI
virtual std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
virtual void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
int16_t getOpRegClassID(const MCOperandInfo &OpInfo) const
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &NewMIs, bool PreferFalse=false) const
Given an instruction marked as isSelect = true, attempt to optimize MI by merging it with one of its ...
virtual bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const
Check if it's possible and beneficial to fold the addressing computation AddrI into the addressing mo...
virtual const MIRFormatter * getMIRFormatter() const
Return MIR formatter to format/parse MIR operands.
bool isReMaterializable(const MachineInstr &MI) const
Return true if the instruction would be materializable at a point in the containing function where al...
virtual InstSizeVerifyMode getInstSizeVerifyMode(const MachineInstr &MI) const
Determine whether/how the instruction size returned by getInstSizeInBytes() should be verified.
virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const
Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
bool isFullCopyInstr(const MachineInstr &MI) const
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const
Returns true if MI's Def is NullValueReg, and the MI does not change the Zero value.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
virtual void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const
Fix up the placeholder we may add in genAlternativeCodeSequence().
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of 'Reg' and a use in the current loop.
bool isUnspillableTerminator(const MachineInstr *MI) const
Return true if the given instruction is terminator that is unspillable, according to isUnspillableTer...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it's profitable to unpredicate one side of a 'diamond', i.e.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
virtual int getExtendResourceLenLimit() const
The limit on resource length extension we accept in MachineCombiner Pass.
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true,...
virtual bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const
For a "cheap" instruction which doesn't enable additional sinking, should MachineSink break a critica...
virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const
Sometimes, it is possible for the target to tell, even without aliasing information,...
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
unsigned getReturnOpcode() const
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool isIgnorableUse(const MachineOperand &MO) const
Given MO is a PhysReg use return if it can be ignored for the purpose of instruction rematerializatio...
virtual unsigned getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const
Returns the opcode that should be use to reduce accumulation registers.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
virtual bool shouldPostRASink(const MachineInstr &MI) const
virtual bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const
Returns true if the two given memory operations should be scheduled adjacent.
virtual unsigned getLiveRangeSplitOpcode(Register Reg, const MachineFunction &MF) const
Allows targets to use appropriate copy instruction while spilitting live range of a register in regis...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient.
virtual unsigned getMemOperandAACheckLimit() const
Return the maximal number of alias checks on memory operands.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
virtual bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const
Return true if the MachineBasicBlock can safely be split to the cold section.
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u, const int16_t *const RegClassByHwModeTable=nullptr)
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const
Emit instructions to copy a pair of physical registers.
virtual unsigned getAccumulationStartOpcode(unsigned Opcode) const
Returns an opcode which defines the accumulator used by \P Opcode.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
virtual bool simplifyInstruction(MachineInstr &MI) const
If possible, converts the instruction to a simplified/canonical form.
virtual std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const
Target dependent implementation to get the values constituting the address MachineInstr that is acces...
virtual std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const
Target-dependent implementation for IsCopyInstr.
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const
Returns true if MI is an instruction that defines Reg to have a constant value and the value is recor...
static bool isGenericOpcode(unsigned Opc)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
const TargetRegisterInfo & getRegisterInfo() const
std::optional< DestSourcePair > isCopyLikeInstr(const MachineInstr &MI) const
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * insertCodePrefetchInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const GlobalValue *GV) const
Inserts a code prefetch instruction before InsertBefore in block MBB targetting GV.
virtual Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF, MachineLoopInfo *MLI) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
@ AllowOverEstimate
Allow the reported instruction size to be larger than the actual size.
@ NoVerify
Do not verify instruction size.
@ ExactSize
Check that the instruction size matches exactly.
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understood.
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineInstr &LoadMI, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
unsigned getCatchReturnOpcode() const
virtual unsigned getTailMergeSize(const MachineFunction &MF) const
Returns the target-specific default value for tail merging.
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
const int16_t *const RegClassByHwMode
Subtarget specific sub-array of MCInstrInfo's RegClassByHwModeTables (i.e.
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const
'Reg' is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual int getJumpTableIndex(const MachineInstr &MI) const
Return an index for MachineJumpTableInfo if insn is an indirect jump using a jump table,...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, int64_t &Offset) const
Returns true if the given MI defines a TargetIndex operand that can be tracked by their offset,...
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const
Allow targets to tell MachineVerifier whether a specific register MachineOperand can be used as part ...
virtual std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
TargetInstrInfo(const TargetInstrInfo &)=delete
virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const
Return an estimate for the code size reduction (in bytes) which will be caused by removing the given ...
virtual ~TargetInstrInfo()
virtual bool isAccumulationOpcode(unsigned Opcode) const
Return true when \P OpCode is an instruction which performs accumulation into one of its operand regi...
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const
Return the increase in code size needed to predicate a contiguous run of NumInsts instructions.
virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const
When calculate the latency of the root instruction, accumulate the latency of the sequence to the roo...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack.
virtual Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack.
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const
Load the specified register of the given register class from the specified stack frame index.
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it's safe to move a machine instruction that defines the specified register class.
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const
Return true if the given terminator MI is not expected to spill.
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a register,...
static bool isGenericAtomicRMWOpcode(unsigned Opc)
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const
Returns true if the target has a preference on the operands order of the given machine instruction.
static const unsigned CommuteAnyOperandIndex
virtual bool isSafeToMove(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Return true if it's safe to move a machine instruction.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
virtual MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const
Emit a load/store instruction with the same value register as MemI, but using the address from AM.
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual bool shouldHoist(const MachineInstr &MI, const MachineLoop *FromLoop) const
Return false if the instruction should not be hoisted by MachineLICM.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
virtual unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const
Returns the target-specific default value for tail duplication.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode * > &NewNodes) const
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE's LookAheadLimit, which is a heuristic used for CSE'ing ph...
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
static constexpr TypeSize getZero()
Definition TypeSize.h:349
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
MachineTraceStrategy
Strategies for selecting traces.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
DWARFExpression::Operation Op
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
ValueUniformity
Enum describing how values behave with respect to uniformity and divergence, to answer the question: ...
Definition Uniformity.h:18
@ Default
The result value is uniform if and only if all operands are uniform.
Definition Uniformity.h:20
#define N
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
static TargetInstrInfo::RegSubRegPair getEmptyKey()
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
const MachineOperand * Source
DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
const MachineOperand * Destination
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
ExtAddrMode()=default
static constexpr LaneBitmask getAll()
Definition LaneBitmask.h:82
Machine model for scheduling, bundling, and heuristics.
Definition MCSchedule.h:258
RegImmPair(Register Reg, int64_t Imm)
Represents a predicate at the MachineFunction level.
bool SingleUseCondition
SingleUseCondition is true if ConditionDef is dead except for the branch(es) at the end of the basic ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
RegSubRegPairAndIdx(Register Reg=Register(), unsigned SubReg=0, unsigned SubIdx=0)
A pair composed of a register and a sub-register index.
bool operator==(const RegSubRegPair &P) const
RegSubRegPair(Register Reg=Register(), unsigned SubReg=0)
bool operator!=(const RegSubRegPair &P) const
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.