LLVM 17.0.0git
TargetInstrInfo.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the target machine instruction set to the code generator.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14#define LLVM_CODEGEN_TARGETINSTRINFO_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Uniformity.h"
29#include "llvm/MC/MCInstrInfo.h"
32#include <cassert>
33#include <cstddef>
34#include <cstdint>
35#include <utility>
36#include <vector>
37
38namespace llvm {
39
40class DFAPacketizer;
41class InstrItineraryData;
42class LiveIntervals;
43class LiveVariables;
44class MachineLoop;
45class MachineMemOperand;
46class MachineRegisterInfo;
47class MCAsmInfo;
48class MCInst;
49struct MCSchedModel;
50class Module;
51class ScheduleDAG;
52class ScheduleDAGMI;
53class ScheduleHazardRecognizer;
54class SDNode;
55class SelectionDAG;
56class SMSchedule;
57class SwingSchedulerDAG;
58class RegScavenger;
59class TargetRegisterClass;
60class TargetRegisterInfo;
61class TargetSchedModel;
62class TargetSubtargetInfo;
63enum class MachineCombinerPattern;
64
65template <class T> class SmallVectorImpl;
66
67using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
68
72
74 : Destination(&Dest), Source(&Src) {}
75};
76
77/// Used to describe a register and immediate addition.
78struct RegImmPair {
80 int64_t Imm;
81
82 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
83};
84
85/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
86/// It holds the register values, the scale value and the displacement.
90 int64_t Scale;
91 int64_t Displacement;
92};
93
94//---------------------------------------------------------------------------
95///
96/// TargetInstrInfo - Interface to description of machine instruction set
97///
99public:
100 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
101 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
102 : CallFrameSetupOpcode(CFSetupOpcode),
103 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
104 ReturnOpcode(ReturnOpcode) {}
108
109 static bool isGenericOpcode(unsigned Opc) {
110 return Opc <= TargetOpcode::GENERIC_OP_END;
111 }
112
113 static bool isGenericAtomicRMWOpcode(unsigned Opc) {
114 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
115 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
116 }
117
118 /// Given a machine instruction descriptor, returns the register
119 /// class constraint for OpNum, or NULL.
120 virtual
121 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
122 const TargetRegisterInfo *TRI,
123 const MachineFunction &MF) const;
124
125 /// Return true if the instruction is trivially rematerializable, meaning it
126 /// has no side effects and requires no operands that aren't always available.
127 /// This means the only allowed uses are constants and unallocatable physical
128 /// registers so that the instructions result is independent of the place
129 /// in the function.
131 return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
132 (MI.getDesc().isRematerializable() &&
134 isReallyTriviallyReMaterializableGeneric(MI)));
135 }
136
137 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
138 /// of instruction rematerialization or sinking.
139 virtual bool isIgnorableUse(const MachineOperand &MO) const {
140 return false;
141 }
142
143protected:
144 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
145 /// set, this hook lets the target specify whether the instruction is actually
146 /// trivially rematerializable, taking into consideration its operands. This
147 /// predicate must return false if the instruction has any side effects other
148 /// than producing a value, or if it requres any address registers that are
149 /// not always available.
150 /// Requirements must be check as stated in isTriviallyReMaterializable() .
152 return false;
153 }
154
155 /// This method commutes the operands of the given machine instruction MI.
156 /// The operands to be commuted are specified by their indices OpIdx1 and
157 /// OpIdx2.
158 ///
159 /// If a target has any instructions that are commutable but require
160 /// converting to different instructions or making non-trivial changes
161 /// to commute them, this method can be overloaded to do that.
162 /// The default implementation simply swaps the commutable operands.
163 ///
164 /// If NewMI is false, MI is modified in place and returned; otherwise, a
165 /// new machine instruction is created and returned.
166 ///
167 /// Do not call this method for a non-commutable instruction.
168 /// Even though the instruction is commutable, the method may still
169 /// fail to commute the operands, null pointer is returned in such cases.
171 unsigned OpIdx1,
172 unsigned OpIdx2) const;
173
174 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
175 /// operand indices to (ResultIdx1, ResultIdx2).
176 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
177 /// predefined to some indices or be undefined (designated by the special
178 /// value 'CommuteAnyOperandIndex').
179 /// The predefined result indices cannot be re-defined.
180 /// The function returns true iff after the result pair redefinition
181 /// the fixed result pair is equal to or equivalent to the source pair of
182 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
183 /// the pairs (x,y) and (y,x) are equivalent.
184 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
185 unsigned CommutableOpIdx1,
186 unsigned CommutableOpIdx2);
187
188private:
189 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
190 /// set and the target hook isReallyTriviallyReMaterializable returns false,
191 /// this function does target-independent tests to determine if the
192 /// instruction is really trivially rematerializable.
193 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI) const;
194
195public:
196 /// These methods return the opcode of the frame setup/destroy instructions
197 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
198 /// order to abstract away the difference between operating with a frame
199 /// pointer and operating without, through the use of these two instructions.
200 ///
201 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
202 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
203
204 /// Returns true if the argument is a frame pseudo instruction.
205 bool isFrameInstr(const MachineInstr &I) const {
206 return I.getOpcode() == getCallFrameSetupOpcode() ||
207 I.getOpcode() == getCallFrameDestroyOpcode();
208 }
209
210 /// Returns true if the argument is a frame setup pseudo instruction.
211 bool isFrameSetup(const MachineInstr &I) const {
212 return I.getOpcode() == getCallFrameSetupOpcode();
213 }
214
215 /// Returns size of the frame associated with the given frame instruction.
216 /// For frame setup instruction this is frame that is set up space set up
217 /// after the instruction. For frame destroy instruction this is the frame
218 /// freed by the caller.
219 /// Note, in some cases a call frame (or a part of it) may be prepared prior
220 /// to the frame setup instruction. It occurs in the calls that involve
221 /// inalloca arguments. This function reports only the size of the frame part
222 /// that is set up between the frame setup and destroy pseudo instructions.
223 int64_t getFrameSize(const MachineInstr &I) const {
224 assert(isFrameInstr(I) && "Not a frame instruction");
225 assert(I.getOperand(0).getImm() >= 0);
226 return I.getOperand(0).getImm();
227 }
228
229 /// Returns the total frame size, which is made up of the space set up inside
230 /// the pair of frame start-stop instructions and the space that is set up
231 /// prior to the pair.
232 int64_t getFrameTotalSize(const MachineInstr &I) const {
233 if (isFrameSetup(I)) {
234 assert(I.getOperand(1).getImm() >= 0 &&
235 "Frame size must not be negative");
236 return getFrameSize(I) + I.getOperand(1).getImm();
237 }
238 return getFrameSize(I);
239 }
240
241 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
242 unsigned getReturnOpcode() const { return ReturnOpcode; }
243
244 /// Returns the actual stack pointer adjustment made by an instruction
245 /// as part of a call sequence. By default, only call frame setup/destroy
246 /// instructions adjust the stack, but targets may want to override this
247 /// to enable more fine-grained adjustment, or adjust by a different value.
248 virtual int getSPAdjust(const MachineInstr &MI) const;
249
250 /// Return true if the instruction is a "coalescable" extension instruction.
251 /// That is, it's like a copy where it's legal for the source to overlap the
252 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
253 /// expected the pre-extension value is available as a subreg of the result
254 /// register. This also returns the sub-register index in SubIdx.
255 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
256 Register &DstReg, unsigned &SubIdx) const {
257 return false;
258 }
259
260 /// If the specified machine instruction is a direct
261 /// load from a stack slot, return the virtual or physical register number of
262 /// the destination along with the FrameIndex of the loaded stack slot. If
263 /// not, return 0. This predicate must return 0 if the instruction has
264 /// any side effects other than loading from the stack slot.
265 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
266 int &FrameIndex) const {
267 return 0;
268 }
269
270 /// Optional extension of isLoadFromStackSlot that returns the number of
271 /// bytes loaded from the stack. This must be implemented if a backend
272 /// supports partial stack slot spills/loads to further disambiguate
273 /// what the load does.
274 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
275 int &FrameIndex,
276 unsigned &MemBytes) const {
277 MemBytes = 0;
278 return isLoadFromStackSlot(MI, FrameIndex);
279 }
280
281 /// Check for post-frame ptr elimination stack locations as well.
282 /// This uses a heuristic so it isn't reliable for correctness.
284 int &FrameIndex) const {
285 return 0;
286 }
287
288 /// If the specified machine instruction has a load from a stack slot,
289 /// return true along with the FrameIndices of the loaded stack slot and the
290 /// machine mem operands containing the reference.
291 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
292 /// any instructions that loads from the stack. This is just a hint, as some
293 /// cases may be missed.
294 virtual bool hasLoadFromStackSlot(
295 const MachineInstr &MI,
297
298 /// If the specified machine instruction is a direct
299 /// store to a stack slot, return the virtual or physical register number of
300 /// the source reg along with the FrameIndex of the loaded stack slot. If
301 /// not, return 0. This predicate must return 0 if the instruction has
302 /// any side effects other than storing to the stack slot.
303 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
304 int &FrameIndex) const {
305 return 0;
306 }
307
308 /// Optional extension of isStoreToStackSlot that returns the number of
309 /// bytes stored to the stack. This must be implemented if a backend
310 /// supports partial stack slot spills/loads to further disambiguate
311 /// what the store does.
312 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
313 int &FrameIndex,
314 unsigned &MemBytes) const {
315 MemBytes = 0;
316 return isStoreToStackSlot(MI, FrameIndex);
317 }
318
319 /// Check for post-frame ptr elimination stack locations as well.
320 /// This uses a heuristic, so it isn't reliable for correctness.
321 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
322 int &FrameIndex) const {
323 return 0;
324 }
325
326 /// If the specified machine instruction has a store to a stack slot,
327 /// return true along with the FrameIndices of the loaded stack slot and the
328 /// machine mem operands containing the reference.
329 /// If not, return false. Unlike isStoreToStackSlot,
330 /// this returns true for any instructions that stores to the
331 /// stack. This is just a hint, as some cases may be missed.
332 virtual bool hasStoreToStackSlot(
333 const MachineInstr &MI,
335
336 /// Return true if the specified machine instruction
337 /// is a copy of one stack slot to another and has no other effect.
338 /// Provide the identity of the two frame indices.
339 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
340 int &SrcFrameIndex) const {
341 return false;
342 }
343
344 /// Compute the size in bytes and offset within a stack slot of a spilled
345 /// register or subregister.
346 ///
347 /// \param [out] Size in bytes of the spilled value.
348 /// \param [out] Offset in bytes within the stack slot.
349 /// \returns true if both Size and Offset are successfully computed.
350 ///
351 /// Not all subregisters have computable spill slots. For example,
352 /// subregisters registers may not be byte-sized, and a pair of discontiguous
353 /// subregisters has no single offset.
354 ///
355 /// Targets with nontrivial bigendian implementations may need to override
356 /// this, particularly to support spilled vector registers.
357 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
358 unsigned &Size, unsigned &Offset,
359 const MachineFunction &MF) const;
360
361 /// Return true if the given instruction is terminator that is unspillable,
362 /// according to isUnspillableTerminatorImpl.
364 return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
365 }
366
367 /// Returns the size in bytes of the specified MachineInstr, or ~0U
368 /// when this function is not implemented by a target.
369 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
370 return ~0U;
371 }
372
373 /// Return true if the instruction is as cheap as a move instruction.
374 ///
375 /// Targets for different archs need to override this, and different
376 /// micro-architectures can also be finely tuned inside.
377 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
378 return MI.isAsCheapAsAMove();
379 }
380
381 /// Return true if the instruction should be sunk by MachineSink.
382 ///
383 /// MachineSink determines on its own whether the instruction is safe to sink;
384 /// this gives the target a hook to override the default behavior with regards
385 /// to which instructions should be sunk.
386 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
387
388 /// Return false if the instruction should not be hoisted by MachineLICM.
389 ///
390 /// MachineLICM determines on its own whether the instruction is safe to
391 /// hoist; this gives the target a hook to extend this assessment and prevent
392 /// an instruction being hoisted from a given loop for target specific
393 /// reasons.
394 virtual bool shouldHoist(const MachineInstr &MI,
395 const MachineLoop *FromLoop) const {
396 return true;
397 }
398
399 /// Re-issue the specified 'original' instruction at the
400 /// specific location targeting a new destination register.
401 /// The register in Orig->getOperand(0).getReg() will be substituted by
402 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
403 /// SubIdx.
404 virtual void reMaterialize(MachineBasicBlock &MBB,
406 unsigned SubIdx, const MachineInstr &Orig,
407 const TargetRegisterInfo &TRI) const;
408
409 /// Clones instruction or the whole instruction bundle \p Orig and
410 /// insert into \p MBB before \p InsertBefore. The target may update operands
411 /// that are required to be unique.
412 ///
413 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
415 MachineBasicBlock::iterator InsertBefore,
416 const MachineInstr &Orig) const;
417
418 /// This method must be implemented by targets that
419 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
420 /// may be able to convert a two-address instruction into one or more true
421 /// three-address instructions on demand. This allows the X86 target (for
422 /// example) to convert ADD and SHL instructions into LEA instructions if they
423 /// would require register copies due to two-addressness.
424 ///
425 /// This method returns a null pointer if the transformation cannot be
426 /// performed, otherwise it returns the last new instruction.
427 ///
428 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
429 /// replacing \p MI with new instructions, even though this function does not
430 /// remove MI.
432 LiveVariables *LV,
433 LiveIntervals *LIS) const {
434 return nullptr;
435 }
436
437 // This constant can be used as an input value of operand index passed to
438 // the method findCommutedOpIndices() to tell the method that the
439 // corresponding operand index is not pre-defined and that the method
440 // can pick any commutable operand.
441 static const unsigned CommuteAnyOperandIndex = ~0U;
442
443 /// This method commutes the operands of the given machine instruction MI.
444 ///
445 /// The operands to be commuted are specified by their indices OpIdx1 and
446 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
447 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
448 /// any arbitrarily chosen commutable operand. If both arguments are set to
449 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
450 /// operands; then commutes them if such operands could be found.
451 ///
452 /// If NewMI is false, MI is modified in place and returned; otherwise, a
453 /// new machine instruction is created and returned.
454 ///
455 /// Do not call this method for a non-commutable instruction or
456 /// for non-commuable operands.
457 /// Even though the instruction is commutable, the method may still
458 /// fail to commute the operands, null pointer is returned in such cases.
460 commuteInstruction(MachineInstr &MI, bool NewMI = false,
461 unsigned OpIdx1 = CommuteAnyOperandIndex,
462 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
463
464 /// Returns true iff the routine could find two commutable operands in the
465 /// given machine instruction.
466 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
467 /// If any of the INPUT values is set to the special value
468 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
469 /// operand, then returns its index in the corresponding argument.
470 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
471 /// looks for 2 commutable operands.
472 /// If INPUT values refer to some operands of MI, then the method simply
473 /// returns true if the corresponding operands are commutable and returns
474 /// false otherwise.
475 ///
476 /// For example, calling this method this way:
477 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
478 /// findCommutedOpIndices(MI, Op1, Op2);
479 /// can be interpreted as a query asking to find an operand that would be
480 /// commutable with the operand#1.
481 virtual bool findCommutedOpIndices(const MachineInstr &MI,
482 unsigned &SrcOpIdx1,
483 unsigned &SrcOpIdx2) const;
484
485 /// Returns true if the target has a preference on the operands order of
486 /// the given machine instruction. And specify if \p Commute is required to
487 /// get the desired operands order.
488 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
489 return false;
490 }
491
492 /// A pair composed of a register and a sub-register index.
493 /// Used to give some type checking when modeling Reg:SubReg.
496 unsigned SubReg;
497
499 : Reg(Reg), SubReg(SubReg) {}
500
501 bool operator==(const RegSubRegPair& P) const {
502 return Reg == P.Reg && SubReg == P.SubReg;
503 }
504 bool operator!=(const RegSubRegPair& P) const {
505 return !(*this == P);
506 }
507 };
508
509 /// A pair composed of a pair of a register and a sub-register index,
510 /// and another sub-register index.
511 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
513 unsigned SubIdx;
514
516 unsigned SubIdx = 0)
518 };
519
520 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
521 /// and \p DefIdx.
522 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
523 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
524 /// flag are not added to this list.
525 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
526 /// two elements:
527 /// - %1:sub1, sub0
528 /// - %2<:0>, sub1
529 ///
530 /// \returns true if it is possible to build such an input sequence
531 /// with the pair \p MI, \p DefIdx. False otherwise.
532 ///
533 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
534 ///
535 /// \note The generic implementation does not provide any support for
536 /// MI.isRegSequenceLike(). In other words, one has to override
537 /// getRegSequenceLikeInputs for target specific instructions.
538 bool
539 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
540 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
541
542 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
543 /// and \p DefIdx.
544 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
545 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
546 /// - %1:sub1, sub0
547 ///
548 /// \returns true if it is possible to build such an input sequence
549 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
550 /// False otherwise.
551 ///
552 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
553 ///
554 /// \note The generic implementation does not provide any support for
555 /// MI.isExtractSubregLike(). In other words, one has to override
556 /// getExtractSubregLikeInputs for target specific instructions.
557 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
558 RegSubRegPairAndIdx &InputReg) const;
559
560 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
561 /// and \p DefIdx.
562 /// \p [out] BaseReg and \p [out] InsertedReg contain
563 /// the equivalent inputs of INSERT_SUBREG.
564 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
565 /// - BaseReg: %0:sub0
566 /// - InsertedReg: %1:sub1, sub3
567 ///
568 /// \returns true if it is possible to build such an input sequence
569 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
570 /// False otherwise.
571 ///
572 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
573 ///
574 /// \note The generic implementation does not provide any support for
575 /// MI.isInsertSubregLike(). In other words, one has to override
576 /// getInsertSubregLikeInputs for target specific instructions.
577 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
578 RegSubRegPair &BaseReg,
579 RegSubRegPairAndIdx &InsertedReg) const;
580
581 /// Return true if two machine instructions would produce identical values.
582 /// By default, this is only true when the two instructions
583 /// are deemed identical except for defs. If this function is called when the
584 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
585 /// aggressive checks.
586 virtual bool produceSameValue(const MachineInstr &MI0,
587 const MachineInstr &MI1,
588 const MachineRegisterInfo *MRI = nullptr) const;
589
590 /// \returns true if a branch from an instruction with opcode \p BranchOpc
591 /// bytes is capable of jumping to a position \p BrOffset bytes away.
592 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
593 int64_t BrOffset) const {
594 llvm_unreachable("target did not implement");
595 }
596
597 /// \returns The block that branch instruction \p MI jumps to.
599 llvm_unreachable("target did not implement");
600 }
601
602 /// Insert an unconditional indirect branch at the end of \p MBB to \p
603 /// NewDestBB. Optionally, insert the clobbered register restoring in \p
604 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
605 /// the offset of the position to insert the new branch.
607 MachineBasicBlock &NewDestBB,
608 MachineBasicBlock &RestoreBB,
609 const DebugLoc &DL, int64_t BrOffset = 0,
610 RegScavenger *RS = nullptr) const {
611 llvm_unreachable("target did not implement");
612 }
613
614 /// Analyze the branching code at the end of MBB, returning
615 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
616 /// implemented for a target). Upon success, this returns false and returns
617 /// with the following information in various cases:
618 ///
619 /// 1. If this block ends with no branches (it just falls through to its succ)
620 /// just return false, leaving TBB/FBB null.
621 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
622 /// the destination block.
623 /// 3. If this block ends with a conditional branch and it falls through to a
624 /// successor block, it sets TBB to be the branch destination block and a
625 /// list of operands that evaluate the condition. These operands can be
626 /// passed to other TargetInstrInfo methods to create new branches.
627 /// 4. If this block ends with a conditional branch followed by an
628 /// unconditional branch, it returns the 'true' destination in TBB, the
629 /// 'false' destination in FBB, and a list of operands that evaluate the
630 /// condition. These operands can be passed to other TargetInstrInfo
631 /// methods to create new branches.
632 ///
633 /// Note that removeBranch and insertBranch must be implemented to support
634 /// cases where this method returns success.
635 ///
636 /// If AllowModify is true, then this routine is allowed to modify the basic
637 /// block (e.g. delete instructions after the unconditional branch).
638 ///
639 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
640 /// before calling this function.
642 MachineBasicBlock *&FBB,
644 bool AllowModify = false) const {
645 return true;
646 }
647
648 /// Represents a predicate at the MachineFunction level. The control flow a
649 /// MachineBranchPredicate represents is:
650 ///
651 /// Reg = LHS `Predicate` RHS == ConditionDef
652 /// if Reg then goto TrueDest else goto FalseDest
653 ///
656 PRED_EQ, // True if two values are equal
657 PRED_NE, // True if two values are not equal
658 PRED_INVALID // Sentinel value
659 };
660
667
668 /// SingleUseCondition is true if ConditionDef is dead except for the
669 /// branch(es) at the end of the basic block.
670 ///
671 bool SingleUseCondition = false;
672
673 explicit MachineBranchPredicate() = default;
674 };
675
676 /// Analyze the branching code at the end of MBB and parse it into the
677 /// MachineBranchPredicate structure if possible. Returns false on success
678 /// and true on failure.
679 ///
680 /// If AllowModify is true, then this routine is allowed to modify the basic
681 /// block (e.g. delete instructions after the unconditional branch).
682 ///
685 bool AllowModify = false) const {
686 return true;
687 }
688
689 /// Remove the branching code at the end of the specific MBB.
690 /// This is only invoked in cases where analyzeBranch returns success. It
691 /// returns the number of instructions that were removed.
692 /// If \p BytesRemoved is non-null, report the change in code size from the
693 /// removed instructions.
695 int *BytesRemoved = nullptr) const {
696 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
697 }
698
699 /// Insert branch code into the end of the specified MachineBasicBlock. The
700 /// operands to this method are the same as those returned by analyzeBranch.
701 /// This is only invoked in cases where analyzeBranch returns success. It
702 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
703 /// report the change in code size from the added instructions.
704 ///
705 /// It is also invoked by tail merging to add unconditional branches in
706 /// cases where analyzeBranch doesn't apply because there was no original
707 /// branch to analyze. At least this much must be implemented, else tail
708 /// merging needs to be disabled.
709 ///
710 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
711 /// before calling this function.
715 const DebugLoc &DL,
716 int *BytesAdded = nullptr) const {
717 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
718 }
719
721 MachineBasicBlock *DestBB,
722 const DebugLoc &DL,
723 int *BytesAdded = nullptr) const {
724 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
725 BytesAdded);
726 }
727
728 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
729 /// implementations to query attributes of the loop being pipelined and to
730 /// apply target-specific updates to the loop once pipelining is complete.
732 public:
734 /// Return true if the given instruction should not be pipelined and should
735 /// be ignored. An example could be a loop comparison, or induction variable
736 /// update with no users being pipelined.
737 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
738
739 /// Return true if the proposed schedule should used. Otherwise return
740 /// false to not pipeline the loop. This function should be used to ensure
741 /// that pipelined loops meet target-specific quality heuristics.
743 return true;
744 }
745
746 /// Create a condition to determine if the trip count of the loop is greater
747 /// than TC, where TC is always one more than for the previous prologue or
748 /// 0 if this is being called for the outermost prologue.
749 ///
750 /// If the trip count is statically known to be greater than TC, return
751 /// true. If the trip count is statically known to be not greater than TC,
752 /// return false. Otherwise return nullopt and fill out Cond with the test
753 /// condition.
754 ///
755 /// Note: This hook is guaranteed to be called from the innermost to the
756 /// outermost prologue of the loop being software pipelined.
757 virtual std::optional<bool>
760
761 /// Modify the loop such that the trip count is
762 /// OriginalTC + TripCountAdjust.
763 virtual void adjustTripCount(int TripCountAdjust) = 0;
764
765 /// Called when the loop's preheader has been modified to NewPreheader.
766 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
767
768 /// Called when the loop is being removed. Any instructions in the preheader
769 /// should be removed.
770 ///
771 /// Once this function is called, no other functions on this object are
772 /// valid; the loop has been removed.
773 virtual void disposed() = 0;
774 };
775
776 /// Analyze loop L, which must be a single-basic-block loop, and if the
777 /// conditions can be understood enough produce a PipelinerLoopInfo object.
778 virtual std::unique_ptr<PipelinerLoopInfo>
780 return nullptr;
781 }
782
783 /// Analyze the loop code, return true if it cannot be understood. Upon
784 /// success, this function returns false and returns information about the
785 /// induction variable and compare instruction used at the end.
786 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
787 MachineInstr *&CmpInst) const {
788 return true;
789 }
790
791 /// Generate code to reduce the loop iteration by one and check if the loop
792 /// is finished. Return the value/register of the new loop count. We need
793 /// this function when peeling off one or more iterations of a loop. This
794 /// function assumes the nth iteration is peeled first.
796 MachineBasicBlock &PreHeader,
797 MachineInstr *IndVar, MachineInstr &Cmp,
800 unsigned Iter, unsigned MaxIter) const {
801 llvm_unreachable("Target didn't implement ReduceLoopCount");
802 }
803
804 /// Delete the instruction OldInst and everything after it, replacing it with
805 /// an unconditional branch to NewDest. This is used by the tail merging pass.
807 MachineBasicBlock *NewDest) const;
808
809 /// Return true if it's legal to split the given basic
810 /// block at the specified instruction (i.e. instruction would be the start
811 /// of a new basic block).
814 return true;
815 }
816
817 /// Return true if it's profitable to predicate
818 /// instructions with accumulated instruction latency of "NumCycles"
819 /// of the specified basic block, where the probability of the instructions
820 /// being executed is given by Probability, and Confidence is a measure
821 /// of our confidence that it will be properly predicted.
822 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
823 unsigned ExtraPredCycles,
824 BranchProbability Probability) const {
825 return false;
826 }
827
828 /// Second variant of isProfitableToIfCvt. This one
829 /// checks for the case where two basic blocks from true and false path
830 /// of a if-then-else (diamond) are predicated on mutually exclusive
831 /// predicates, where the probability of the true path being taken is given
832 /// by Probability, and Confidence is a measure of our confidence that it
833 /// will be properly predicted.
834 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
835 unsigned ExtraTCycles,
836 MachineBasicBlock &FMBB, unsigned NumFCycles,
837 unsigned ExtraFCycles,
838 BranchProbability Probability) const {
839 return false;
840 }
841
842 /// Return true if it's profitable for if-converter to duplicate instructions
843 /// of specified accumulated instruction latencies in the specified MBB to
844 /// enable if-conversion.
845 /// The probability of the instructions being executed is given by
846 /// Probability, and Confidence is a measure of our confidence that it
847 /// will be properly predicted.
849 unsigned NumCycles,
850 BranchProbability Probability) const {
851 return false;
852 }
853
854 /// Return the increase in code size needed to predicate a contiguous run of
855 /// NumInsts instructions.
857 unsigned NumInsts) const {
858 return 0;
859 }
860
861 /// Return an estimate for the code size reduction (in bytes) which will be
862 /// caused by removing the given branch instruction during if-conversion.
863 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
864 return getInstSizeInBytes(MI);
865 }
866
867 /// Return true if it's profitable to unpredicate
868 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
869 /// exclusive predicates.
870 /// e.g.
871 /// subeq r0, r1, #1
872 /// addne r0, r1, #1
873 /// =>
874 /// sub r0, r1, #1
875 /// addne r0, r1, #1
876 ///
877 /// This may be profitable is conditional instructions are always executed.
879 MachineBasicBlock &FMBB) const {
880 return false;
881 }
882
883 /// Return true if it is possible to insert a select
884 /// instruction that chooses between TrueReg and FalseReg based on the
885 /// condition code in Cond.
886 ///
887 /// When successful, also return the latency in cycles from TrueReg,
888 /// FalseReg, and Cond to the destination register. In most cases, a select
889 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
890 ///
891 /// Some x86 implementations have 2-cycle cmov instructions.
892 ///
893 /// @param MBB Block where select instruction would be inserted.
894 /// @param Cond Condition returned by analyzeBranch.
895 /// @param DstReg Virtual dest register that the result should write to.
896 /// @param TrueReg Virtual register to select when Cond is true.
897 /// @param FalseReg Virtual register to select when Cond is false.
898 /// @param CondCycles Latency from Cond+Branch to select output.
899 /// @param TrueCycles Latency from TrueReg to select output.
900 /// @param FalseCycles Latency from FalseReg to select output.
903 Register TrueReg, Register FalseReg,
904 int &CondCycles, int &TrueCycles,
905 int &FalseCycles) const {
906 return false;
907 }
908
909 /// Insert a select instruction into MBB before I that will copy TrueReg to
910 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
911 ///
912 /// This function can only be called after canInsertSelect() returned true.
913 /// The condition in Cond comes from analyzeBranch, and it can be assumed
914 /// that the same flags or registers required by Cond are available at the
915 /// insertion point.
916 ///
917 /// @param MBB Block where select instruction should be inserted.
918 /// @param I Insertion point.
919 /// @param DL Source location for debugging.
920 /// @param DstReg Virtual register to be defined by select instruction.
921 /// @param Cond Condition as computed by analyzeBranch.
922 /// @param TrueReg Virtual register to copy when Cond is true.
923 /// @param FalseReg Virtual register to copy when Cons is false.
927 Register TrueReg, Register FalseReg) const {
928 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
929 }
930
931 /// Analyze the given select instruction, returning true if
932 /// it cannot be understood. It is assumed that MI->isSelect() is true.
933 ///
934 /// When successful, return the controlling condition and the operands that
935 /// determine the true and false result values.
936 ///
937 /// Result = SELECT Cond, TrueOp, FalseOp
938 ///
939 /// Some targets can optimize select instructions, for example by predicating
940 /// the instruction defining one of the operands. Such targets should set
941 /// Optimizable.
942 ///
943 /// @param MI Select instruction to analyze.
944 /// @param Cond Condition controlling the select.
945 /// @param TrueOp Operand number of the value selected when Cond is true.
946 /// @param FalseOp Operand number of the value selected when Cond is false.
947 /// @param Optimizable Returned as true if MI is optimizable.
948 /// @returns False on success.
949 virtual bool analyzeSelect(const MachineInstr &MI,
951 unsigned &TrueOp, unsigned &FalseOp,
952 bool &Optimizable) const {
953 assert(MI.getDesc().isSelect() && "MI must be a select instruction");
954 return true;
955 }
956
957 /// Given a select instruction that was understood by
958 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
959 /// merging it with one of its operands. Returns NULL on failure.
960 ///
961 /// When successful, returns the new select instruction. The client is
962 /// responsible for deleting MI.
963 ///
964 /// If both sides of the select can be optimized, PreferFalse is used to pick
965 /// a side.
966 ///
967 /// @param MI Optimizable select instruction.
968 /// @param NewMIs Set that record all MIs in the basic block up to \p
969 /// MI. Has to be updated with any newly created MI or deleted ones.
970 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
971 /// @returns Optimized instruction or NULL.
974 bool PreferFalse = false) const {
975 // This function must be implemented if Optimizable is ever set.
976 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
977 }
978
979 /// Emit instructions to copy a pair of physical registers.
980 ///
981 /// This function should support copies within any legal register class as
982 /// well as any cross-class copies created during instruction selection.
983 ///
984 /// The source and destination registers may overlap, which may require a
985 /// careful implementation when multiple copy instructions are required for
986 /// large registers. See for example the ARM target.
989 MCRegister DestReg, MCRegister SrcReg,
990 bool KillSrc) const {
991 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
992 }
993
994 /// Allow targets to tell MachineVerifier whether a specific register
995 /// MachineOperand can be used as part of PC-relative addressing.
996 /// PC-relative addressing modes in many CISC architectures contain
997 /// (non-PC) registers as offsets or scaling values, which inherently
998 /// tags the corresponding MachineOperand with OPERAND_PCREL.
999 ///
1000 /// @param MO The MachineOperand in question. MO.isReg() should always
1001 /// be true.
1002 /// @return Whether this operand is allowed to be used PC-relatively.
1003 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1004 return false;
1005 }
1006
1007protected:
1008 /// Target-dependent implementation for IsCopyInstr.
1009 /// If the specific machine instruction is a instruction that moves/copies
1010 /// value from one register to another register return destination and source
1011 /// registers as machine operands.
1012 virtual std::optional<DestSourcePair>
1014 return std::nullopt;
1015 }
1016
1017 /// Return true if the given terminator MI is not expected to spill. This
1018 /// sets the live interval as not spillable and adjusts phi node lowering to
1019 /// not introduce copies after the terminator. Use with care, these are
1020 /// currently used for hardware loop intrinsics in very controlled situations,
1021 /// created prior to registry allocation in loops that only have single phi
1022 /// users for the terminators value. They may run out of registers if not used
1023 /// carefully.
1024 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1025 return false;
1026 }
1027
1028public:
1029 /// If the specific machine instruction is a instruction that moves/copies
1030 /// value from one register to another register return destination and source
1031 /// registers as machine operands.
1032 /// For COPY-instruction the method naturally returns destination and source
1033 /// registers as machine operands, for all other instructions the method calls
1034 /// target-dependent implementation.
1035 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1036 if (MI.isCopy()) {
1037 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1038 }
1039 return isCopyInstrImpl(MI);
1040 }
1041
1042 /// If the specific machine instruction is an instruction that adds an
1043 /// immediate value and a physical register, and stores the result in
1044 /// the given physical register \c Reg, return a pair of the source
1045 /// register and the offset which has been added.
1046 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1047 Register Reg) const {
1048 return std::nullopt;
1049 }
1050
1051 /// Returns true if MI is an instruction that defines Reg to have a constant
1052 /// value and the value is recorded in ImmVal. The ImmVal is a result that
1053 /// should be interpreted as modulo size of Reg.
1055 const Register Reg,
1056 int64_t &ImmVal) const {
1057 return false;
1058 }
1059
1060 /// Store the specified register of the given register class to the specified
1061 /// stack frame index. The store instruction is to be added to the given
1062 /// machine basic block before the specified machine instruction. If isKill
1063 /// is true, the register operand is the last use and must be marked kill. If
1064 /// \p SrcReg is being directly spilled as part of assigning a virtual
1065 /// register, \p VReg is the register being assigned. This additional register
1066 /// argument is needed for certain targets when invoked from RegAllocFast to
1067 /// map the spilled physical register to its virtual register. A null register
1068 /// can be passed elsewhere.
1071 Register SrcReg, bool isKill, int FrameIndex,
1072 const TargetRegisterClass *RC,
1073 const TargetRegisterInfo *TRI,
1074 Register VReg) const {
1075 llvm_unreachable("Target didn't implement "
1076 "TargetInstrInfo::storeRegToStackSlot!");
1077 }
1078
1079 /// Load the specified register of the given register class from the specified
1080 /// stack frame index. The load instruction is to be added to the given
1081 /// machine basic block before the specified machine instruction. If \p
1082 /// DestReg is being directly reloaded as part of assigning a virtual
1083 /// register, \p VReg is the register being assigned. This additional register
1084 /// argument is needed for certain targets when invoked from RegAllocFast to
1085 /// map the loaded physical register to its virtual register. A null register
1086 /// can be passed elsewhere.
1089 Register DestReg, int FrameIndex,
1090 const TargetRegisterClass *RC,
1091 const TargetRegisterInfo *TRI,
1092 Register VReg) const {
1093 llvm_unreachable("Target didn't implement "
1094 "TargetInstrInfo::loadRegFromStackSlot!");
1095 }
1096
1097 /// This function is called for all pseudo instructions
1098 /// that remain after register allocation. Many pseudo instructions are
1099 /// created to help register allocation. This is the place to convert them
1100 /// into real instructions. The target can edit MI in place, or it can insert
1101 /// new instructions and erase MI. The function should return true if
1102 /// anything was changed.
1103 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1104
1105 /// Check whether the target can fold a load that feeds a subreg operand
1106 /// (or a subreg operand that feeds a store).
1107 /// For example, X86 may want to return true if it can fold
1108 /// movl (%esp), %eax
1109 /// subb, %al, ...
1110 /// Into:
1111 /// subb (%esp), ...
1112 ///
1113 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1114 /// reject subregs - but since this behavior used to be enforced in the
1115 /// target-independent code, moving this responsibility to the targets
1116 /// has the potential of causing nasty silent breakage in out-of-tree targets.
1117 virtual bool isSubregFoldable() const { return false; }
1118
1119 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1120 /// operands which can't be folded into stack references. Operands outside
1121 /// of the range are most likely foldable but it is not guaranteed.
1122 /// These instructions are unique in that stack references for some operands
1123 /// have the same execution cost (e.g. none) as the unfolded register forms.
1124 /// The ranged return is guaranteed to include all operands which can't be
1125 /// folded at zero cost.
1126 virtual std::pair<unsigned, unsigned>
1128
1129 /// Attempt to fold a load or store of the specified stack
1130 /// slot into the specified machine instruction for the specified operand(s).
1131 /// If this is possible, a new instruction is returned with the specified
1132 /// operand folded, otherwise NULL is returned.
1133 /// The new instruction is inserted before MI, and the client is responsible
1134 /// for removing the old instruction.
1135 /// If VRM is passed, the assigned physregs can be inspected by target to
1136 /// decide on using an opcode (note that those assignments can still change).
1138 int FI,
1139 LiveIntervals *LIS = nullptr,
1140 VirtRegMap *VRM = nullptr) const;
1141
1142 /// Same as the previous version except it allows folding of any load and
1143 /// store from / to any address, not just from a specific stack slot.
1145 MachineInstr &LoadMI,
1146 LiveIntervals *LIS = nullptr) const;
1147
1148 /// Return true when there is potentially a faster code sequence
1149 /// for an instruction chain ending in \p Root. All potential patterns are
1150 /// returned in the \p Pattern vector. Pattern should be sorted in priority
1151 /// order since the pattern evaluator stops checking as soon as it finds a
1152 /// faster sequence.
1153 /// \param Root - Instruction that could be combined with one of its operands
1154 /// \param Patterns - Vector of possible combination patterns
1155 virtual bool
1158 bool DoRegPressureReduce) const;
1159
1160 /// Return true if target supports reassociation of instructions in machine
1161 /// combiner pass to reduce register pressure for a given BB.
1162 virtual bool
1164 const RegisterClassInfo *RegClassInfo) const {
1165 return false;
1166 }
1167
1168 /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1169 virtual void
1171 SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1172
1173 /// Return true when a code sequence can improve throughput. It
1174 /// should be called only for instructions in loops.
1175 /// \param Pattern - combiner pattern
1177
1178 /// Return true if the input \P Inst is part of a chain of dependent ops
1179 /// that are suitable for reassociation, otherwise return false.
1180 /// If the instruction's operands must be commuted to have a previous
1181 /// instruction of the same type define the first source operand, \P Commuted
1182 /// will be set to true.
1183 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1184
1185 /// Return true when \P Inst is both associative and commutative. If \P Invert
1186 /// is true, then the inverse of \P Inst operation must be tested.
1188 bool Invert = false) const {
1189 return false;
1190 }
1191
1192 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1193 /// for sub and vice versa).
1194 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1195 return std::nullopt;
1196 }
1197
1198 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1199 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1200
1201 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1202 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1203 const MachineBasicBlock *MBB) const;
1204
1205 /// Return true when \P Inst has reassociable sibling.
1206 virtual bool hasReassociableSibling(const MachineInstr &Inst,
1207 bool &Commuted) const;
1208
1209 /// When getMachineCombinerPatterns() finds patterns, this function generates
1210 /// the instructions that could replace the original code sequence. The client
1211 /// has to decide whether the actual replacement is beneficial or not.
1212 /// \param Root - Instruction that could be combined with one of its operands
1213 /// \param Pattern - Combination pattern for Root
1214 /// \param InsInstrs - Vector of new instructions that implement P
1215 /// \param DelInstrs - Old instructions, including Root, that could be
1216 /// replaced by InsInstr
1217 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1218 /// InsInstr that defines it
1219 virtual void genAlternativeCodeSequence(
1223 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
1224
1225 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1226 /// reduce critical path length.
1227 void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
1231 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
1232
1233 /// Reassociation of some instructions requires inverse operations (e.g.
1234 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1235 /// (new root opcode, new prev opcode) that must be used to reassociate \P
1236 /// Root and \P Prev accoring to \P Pattern.
1237 std::pair<unsigned, unsigned>
1239 const MachineInstr &Root,
1240 const MachineInstr &Prev) const;
1241
1242 /// The limit on resource length extension we accept in MachineCombiner Pass.
1243 virtual int getExtendResourceLenLimit() const { return 0; }
1244
1245 /// This is an architecture-specific helper function of reassociateOps.
1246 /// Set special operand attributes for new instructions after reassociation.
1247 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1248 MachineInstr &NewMI1,
1249 MachineInstr &NewMI2) const {}
1250
1251 /// Return true when a target supports MachineCombiner.
1252 virtual bool useMachineCombiner() const { return false; }
1253
1254 /// Return true if the given SDNode can be copied during scheduling
1255 /// even if it has glue.
1256 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1257
1258protected:
1259 /// Target-dependent implementation for foldMemoryOperand.
1260 /// Target-independent code in foldMemoryOperand will
1261 /// take care of adding a MachineMemOperand to the newly created instruction.
1262 /// The instruction and any auxiliary instructions necessary will be inserted
1263 /// at InsertPt.
1264 virtual MachineInstr *
1267 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1268 LiveIntervals *LIS = nullptr,
1269 VirtRegMap *VRM = nullptr) const {
1270 return nullptr;
1271 }
1272
1273 /// Target-dependent implementation for foldMemoryOperand.
1274 /// Target-independent code in foldMemoryOperand will
1275 /// take care of adding a MachineMemOperand to the newly created instruction.
1276 /// The instruction and any auxiliary instructions necessary will be inserted
1277 /// at InsertPt.
1280 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1281 LiveIntervals *LIS = nullptr) const {
1282 return nullptr;
1283 }
1284
1285 /// Target-dependent implementation of getRegSequenceInputs.
1286 ///
1287 /// \returns true if it is possible to build the equivalent
1288 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1289 ///
1290 /// \pre MI.isRegSequenceLike().
1291 ///
1292 /// \see TargetInstrInfo::getRegSequenceInputs.
1294 const MachineInstr &MI, unsigned DefIdx,
1295 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1296 return false;
1297 }
1298
1299 /// Target-dependent implementation of getExtractSubregInputs.
1300 ///
1301 /// \returns true if it is possible to build the equivalent
1302 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1303 ///
1304 /// \pre MI.isExtractSubregLike().
1305 ///
1306 /// \see TargetInstrInfo::getExtractSubregInputs.
1308 unsigned DefIdx,
1309 RegSubRegPairAndIdx &InputReg) const {
1310 return false;
1311 }
1312
1313 /// Target-dependent implementation of getInsertSubregInputs.
1314 ///
1315 /// \returns true if it is possible to build the equivalent
1316 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1317 ///
1318 /// \pre MI.isInsertSubregLike().
1319 ///
1320 /// \see TargetInstrInfo::getInsertSubregInputs.
1321 virtual bool
1323 RegSubRegPair &BaseReg,
1324 RegSubRegPairAndIdx &InsertedReg) const {
1325 return false;
1326 }
1327
1328public:
1329 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1330 /// a store or a load and a store into two or more instruction. If this is
1331 /// possible, returns true as well as the new instructions by reference.
1332 virtual bool
1334 bool UnfoldLoad, bool UnfoldStore,
1335 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1336 return false;
1337 }
1338
1340 SmallVectorImpl<SDNode *> &NewNodes) const {
1341 return false;
1342 }
1343
1344 /// Returns the opcode of the would be new
1345 /// instruction after load / store are unfolded from an instruction of the
1346 /// specified opcode. It returns zero if the specified unfolding is not
1347 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1348 /// index of the operand which will hold the register holding the loaded
1349 /// value.
1350 virtual unsigned
1351 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1352 unsigned *LoadRegIndex = nullptr) const {
1353 return 0;
1354 }
1355
1356 /// This is used by the pre-regalloc scheduler to determine if two loads are
1357 /// loading from the same base address. It should only return true if the base
1358 /// pointers are the same and the only differences between the two addresses
1359 /// are the offset. It also returns the offsets by reference.
1360 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1361 int64_t &Offset1,
1362 int64_t &Offset2) const {
1363 return false;
1364 }
1365
1366 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1367 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1368 /// On some targets if two loads are loading from
1369 /// addresses in the same cache line, it's better if they are scheduled
1370 /// together. This function takes two integers that represent the load offsets
1371 /// from the common base address. It returns true if it decides it's desirable
1372 /// to schedule the two loads together. "NumLoads" is the number of loads that
1373 /// have already been scheduled after Load1.
1374 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1375 int64_t Offset1, int64_t Offset2,
1376 unsigned NumLoads) const {
1377 return false;
1378 }
1379
1380 /// Get the base operand and byte offset of an instruction that reads/writes
1381 /// memory. This is a convenience function for callers that are only prepared
1382 /// to handle a single base operand.
1384 const MachineOperand *&BaseOp, int64_t &Offset,
1385 bool &OffsetIsScalable,
1386 const TargetRegisterInfo *TRI) const;
1387
1388 /// Get zero or more base operands and the byte offset of an instruction that
1389 /// reads/writes memory. Note that there may be zero base operands if the
1390 /// instruction accesses a constant address.
1391 /// It returns false if MI does not read/write memory.
1392 /// It returns false if base operands and offset could not be determined.
1393 /// It is not guaranteed to always recognize base operands and offsets in all
1394 /// cases.
1397 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
1398 const TargetRegisterInfo *TRI) const {
1399 return false;
1400 }
1401
1402 /// Return true if the instruction contains a base register and offset. If
1403 /// true, the function also sets the operand position in the instruction
1404 /// for the base register and offset.
1406 unsigned &BasePos,
1407 unsigned &OffsetPos) const {
1408 return false;
1409 }
1410
1411 /// Target dependent implementation to get the values constituting the address
1412 /// MachineInstr that is accessing memory. These values are returned as a
1413 /// struct ExtAddrMode which contains all relevant information to make up the
1414 /// address.
1415 virtual std::optional<ExtAddrMode>
1417 const TargetRegisterInfo *TRI) const {
1418 return std::nullopt;
1419 }
1420
1421 /// Returns true if MI's Def is NullValueReg, and the MI
1422 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1423 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1424 /// function can return true even if becomes zero. Specifically cases such as
1425 /// NullValueReg = shl NullValueReg, 63.
1427 const Register NullValueReg,
1428 const TargetRegisterInfo *TRI) const {
1429 return false;
1430 }
1431
1432 /// If the instruction is an increment of a constant value, return the amount.
1433 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1434 return false;
1435 }
1436
1437 /// Returns true if the two given memory operations should be scheduled
1438 /// adjacent. Note that you have to add:
1439 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1440 /// or
1441 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1442 /// to TargetPassConfig::createMachineScheduler() to have an effect.
1443 ///
1444 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1445 /// \p NumLoads is the number of loads that will be in the cluster if this
1446 /// hook returns true.
1447 /// \p NumBytes is the number of bytes that will be loaded from all the
1448 /// clustered loads if this hook returns true.
1451 unsigned NumLoads, unsigned NumBytes) const {
1452 llvm_unreachable("target did not implement shouldClusterMemOps()");
1453 }
1454
1455 /// Reverses the branch condition of the specified condition list,
1456 /// returning false on success and true if it cannot be reversed.
1457 virtual bool
1459 return true;
1460 }
1461
1462 /// Insert a noop into the instruction stream at the specified point.
1463 virtual void insertNoop(MachineBasicBlock &MBB,
1465
1466 /// Insert noops into the instruction stream at the specified point.
1467 virtual void insertNoops(MachineBasicBlock &MBB,
1469 unsigned Quantity) const;
1470
1471 /// Return the noop instruction to use for a noop.
1472 virtual MCInst getNop() const;
1473
1474 /// Return true for post-incremented instructions.
1475 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1476
1477 /// Returns true if the instruction is already predicated.
1478 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1479
1480 /// Assumes the instruction is already predicated and returns true if the
1481 /// instruction can be predicated again.
1482 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1483 assert(isPredicated(MI) && "Instruction is not predicated");
1484 return false;
1485 }
1486
1487 // Returns a MIRPrinter comment for this machine operand.
1488 virtual std::string
1490 unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1491
1492 /// Returns true if the instruction is a
1493 /// terminator instruction that has not been predicated.
1494 bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1495
1496 /// Returns true if MI is an unconditional tail call.
1497 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1498 return false;
1499 }
1500
1501 /// Returns true if the tail call can be made conditional on BranchCond.
1503 const MachineInstr &TailCall) const {
1504 return false;
1505 }
1506
1507 /// Replace the conditional branch in MBB with a conditional tail call.
1510 const MachineInstr &TailCall) const {
1511 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1512 }
1513
1514 /// Convert the instruction into a predicated instruction.
1515 /// It returns true if the operation was successful.
1516 virtual bool PredicateInstruction(MachineInstr &MI,
1517 ArrayRef<MachineOperand> Pred) const;
1518
1519 /// Returns true if the first specified predicate
1520 /// subsumes the second, e.g. GE subsumes GT.
1522 ArrayRef<MachineOperand> Pred2) const {
1523 return false;
1524 }
1525
1526 /// If the specified instruction defines any predicate
1527 /// or condition code register(s) used for predication, returns true as well
1528 /// as the definition predicate(s) by reference.
1529 /// SkipDead should be set to false at any point that dead
1530 /// predicate instructions should be considered as being defined.
1531 /// A dead predicate instruction is one that is guaranteed to be removed
1532 /// after a call to PredicateInstruction.
1534 std::vector<MachineOperand> &Pred,
1535 bool SkipDead) const {
1536 return false;
1537 }
1538
1539 /// Return true if the specified instruction can be predicated.
1540 /// By default, this returns true for every instruction with a
1541 /// PredicateOperand.
1542 virtual bool isPredicable(const MachineInstr &MI) const {
1543 return MI.getDesc().isPredicable();
1544 }
1545
1546 /// Return true if it's safe to move a machine
1547 /// instruction that defines the specified register class.
1548 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1549 return true;
1550 }
1551
1552 /// Test if the given instruction should be considered a scheduling boundary.
1553 /// This primarily includes labels and terminators.
1554 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1555 const MachineBasicBlock *MBB,
1556 const MachineFunction &MF) const;
1557
1558 /// Measure the specified inline asm to determine an approximation of its
1559 /// length.
1560 virtual unsigned getInlineAsmLength(
1561 const char *Str, const MCAsmInfo &MAI,
1562 const TargetSubtargetInfo *STI = nullptr) const;
1563
1564 /// Allocate and return a hazard recognizer to use for this target when
1565 /// scheduling the machine instructions before register allocation.
1566 virtual ScheduleHazardRecognizer *
1568 const ScheduleDAG *DAG) const;
1569
1570 /// Allocate and return a hazard recognizer to use for this target when
1571 /// scheduling the machine instructions before register allocation.
1572 virtual ScheduleHazardRecognizer *
1574 const ScheduleDAGMI *DAG) const;
1575
1576 /// Allocate and return a hazard recognizer to use for this target when
1577 /// scheduling the machine instructions after register allocation.
1578 virtual ScheduleHazardRecognizer *
1580 const ScheduleDAG *DAG) const;
1581
1582 /// Allocate and return a hazard recognizer to use for by non-scheduling
1583 /// passes.
1584 virtual ScheduleHazardRecognizer *
1586 return nullptr;
1587 }
1588
1589 /// Provide a global flag for disabling the PreRA hazard recognizer that
1590 /// targets may choose to honor.
1591 bool usePreRAHazardRecognizer() const;
1592
1593 /// For a comparison instruction, return the source registers
1594 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1595 /// compares against in CmpValue. Return true if the comparison instruction
1596 /// can be analyzed.
1597 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1598 Register &SrcReg2, int64_t &Mask,
1599 int64_t &Value) const {
1600 return false;
1601 }
1602
1603 /// See if the comparison instruction can be converted
1604 /// into something more efficient. E.g., on ARM most instructions can set the
1605 /// flags register, obviating the need for a separate CMP.
1606 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1607 Register SrcReg2, int64_t Mask,
1608 int64_t Value,
1609 const MachineRegisterInfo *MRI) const {
1610 return false;
1611 }
1612 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1613
1614 /// Try to remove the load by folding it to a register operand at the use.
1615 /// We fold the load instructions if and only if the
1616 /// def and use are in the same BB. We only look at one load and see
1617 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1618 /// defined by the load we are trying to fold. DefMI returns the machine
1619 /// instruction that defines FoldAsLoadDefReg, and the function returns
1620 /// the machine instruction generated due to folding.
1622 const MachineRegisterInfo *MRI,
1623 Register &FoldAsLoadDefReg,
1624 MachineInstr *&DefMI) const {
1625 return nullptr;
1626 }
1627
1628 /// 'Reg' is known to be defined by a move immediate instruction,
1629 /// try to fold the immediate into the use instruction.
1630 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1631 /// then the caller may assume that DefMI has been erased from its parent
1632 /// block. The caller may assume that it will not be erased by this
1633 /// function otherwise.
1636 return false;
1637 }
1638
1639 /// Return the number of u-operations the given machine
1640 /// instruction will be decoded to on the target cpu. The itinerary's
1641 /// IssueWidth is the number of microops that can be dispatched each
1642 /// cycle. An instruction with zero microops takes no dispatch resources.
1643 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1644 const MachineInstr &MI) const;
1645
1646 /// Return true for pseudo instructions that don't consume any
1647 /// machine resources in their current form. These are common cases that the
1648 /// scheduler should consider free, rather than conservatively handling them
1649 /// as instructions with no itinerary.
1650 bool isZeroCost(unsigned Opcode) const {
1651 return Opcode <= TargetOpcode::COPY;
1652 }
1653
1654 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1655 SDNode *DefNode, unsigned DefIdx,
1656 SDNode *UseNode, unsigned UseIdx) const;
1657
1658 /// Compute and return the use operand latency of a given pair of def and use.
1659 /// In most cases, the static scheduling itinerary was enough to determine the
1660 /// operand latency. But it may not be possible for instructions with variable
1661 /// number of defs / uses.
1662 ///
1663 /// This is a raw interface to the itinerary that may be directly overridden
1664 /// by a target. Use computeOperandLatency to get the best estimate of
1665 /// latency.
1666 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1667 const MachineInstr &DefMI, unsigned DefIdx,
1668 const MachineInstr &UseMI,
1669 unsigned UseIdx) const;
1670
1671 /// Compute the instruction latency of a given instruction.
1672 /// If the instruction has higher cost when predicated, it's returned via
1673 /// PredCost.
1674 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1675 const MachineInstr &MI,
1676 unsigned *PredCost = nullptr) const;
1677
1678 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1679
1680 virtual int getInstrLatency(const InstrItineraryData *ItinData,
1681 SDNode *Node) const;
1682
1683 /// Return the default expected latency for a def based on its opcode.
1684 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1685 const MachineInstr &DefMI) const;
1686
1687 /// Return true if this opcode has high latency to its result.
1688 virtual bool isHighLatencyDef(int opc) const { return false; }
1689
1690 /// Compute operand latency between a def of 'Reg'
1691 /// and a use in the current loop. Return true if the target considered
1692 /// it 'high'. This is used by optimization passes such as machine LICM to
1693 /// determine whether it makes sense to hoist an instruction out even in a
1694 /// high register pressure situation.
1695 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1696 const MachineRegisterInfo *MRI,
1697 const MachineInstr &DefMI, unsigned DefIdx,
1698 const MachineInstr &UseMI,
1699 unsigned UseIdx) const {
1700 return false;
1701 }
1702
1703 /// Compute operand latency of a def of 'Reg'. Return true
1704 /// if the target considered it 'low'.
1705 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1706 const MachineInstr &DefMI,
1707 unsigned DefIdx) const;
1708
1709 /// Perform target-specific instruction verification.
1710 virtual bool verifyInstruction(const MachineInstr &MI,
1711 StringRef &ErrInfo) const {
1712 return true;
1713 }
1714
1715 /// Return the current execution domain and bit mask of
1716 /// possible domains for instruction.
1717 ///
1718 /// Some micro-architectures have multiple execution domains, and multiple
1719 /// opcodes that perform the same operation in different domains. For
1720 /// example, the x86 architecture provides the por, orps, and orpd
1721 /// instructions that all do the same thing. There is a latency penalty if a
1722 /// register is written in one domain and read in another.
1723 ///
1724 /// This function returns a pair (domain, mask) containing the execution
1725 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1726 /// function can be used to change the opcode to one of the domains in the
1727 /// bit mask. Instructions whose execution domain can't be changed should
1728 /// return a 0 mask.
1729 ///
1730 /// The execution domain numbers don't have any special meaning except domain
1731 /// 0 is used for instructions that are not associated with any interesting
1732 /// execution domain.
1733 ///
1734 virtual std::pair<uint16_t, uint16_t>
1736 return std::make_pair(0, 0);
1737 }
1738
1739 /// Change the opcode of MI to execute in Domain.
1740 ///
1741 /// The bit (1 << Domain) must be set in the mask returned from
1742 /// getExecutionDomain(MI).
1743 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1744
1745 /// Returns the preferred minimum clearance
1746 /// before an instruction with an unwanted partial register update.
1747 ///
1748 /// Some instructions only write part of a register, and implicitly need to
1749 /// read the other parts of the register. This may cause unwanted stalls
1750 /// preventing otherwise unrelated instructions from executing in parallel in
1751 /// an out-of-order CPU.
1752 ///
1753 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1754 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1755 /// the instruction needs to wait for the old value of the register to become
1756 /// available:
1757 ///
1758 /// addps %xmm1, %xmm0
1759 /// movaps %xmm0, (%rax)
1760 /// cvtsi2ss %rbx, %xmm0
1761 ///
1762 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1763 /// instruction before it can issue, even though the high bits of %xmm0
1764 /// probably aren't needed.
1765 ///
1766 /// This hook returns the preferred clearance before MI, measured in
1767 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1768 /// instructions before MI. It should only return a positive value for
1769 /// unwanted dependencies. If the old bits of the defined register have
1770 /// useful values, or if MI is determined to otherwise read the dependency,
1771 /// the hook should return 0.
1772 ///
1773 /// The unwanted dependency may be handled by:
1774 ///
1775 /// 1. Allocating the same register for an MI def and use. That makes the
1776 /// unwanted dependency identical to a required dependency.
1777 ///
1778 /// 2. Allocating a register for the def that has no defs in the previous N
1779 /// instructions.
1780 ///
1781 /// 3. Calling breakPartialRegDependency() with the same arguments. This
1782 /// allows the target to insert a dependency breaking instruction.
1783 ///
1784 virtual unsigned
1786 const TargetRegisterInfo *TRI) const {
1787 // The default implementation returns 0 for no partial register dependency.
1788 return 0;
1789 }
1790
1791 /// Return the minimum clearance before an instruction that reads an
1792 /// unused register.
1793 ///
1794 /// For example, AVX instructions may copy part of a register operand into
1795 /// the unused high bits of the destination register.
1796 ///
1797 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1798 ///
1799 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1800 /// false dependence on any previous write to %xmm0.
1801 ///
1802 /// This hook works similarly to getPartialRegUpdateClearance, except that it
1803 /// does not take an operand index. Instead sets \p OpNum to the index of the
1804 /// unused register.
1805 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
1806 const TargetRegisterInfo *TRI) const {
1807 // The default implementation returns 0 for no undef register dependency.
1808 return 0;
1809 }
1810
1811 /// Insert a dependency-breaking instruction
1812 /// before MI to eliminate an unwanted dependency on OpNum.
1813 ///
1814 /// If it wasn't possible to avoid a def in the last N instructions before MI
1815 /// (see getPartialRegUpdateClearance), this hook will be called to break the
1816 /// unwanted dependency.
1817 ///
1818 /// On x86, an xorps instruction can be used as a dependency breaker:
1819 ///
1820 /// addps %xmm1, %xmm0
1821 /// movaps %xmm0, (%rax)
1822 /// xorps %xmm0, %xmm0
1823 /// cvtsi2ss %rbx, %xmm0
1824 ///
1825 /// An <imp-kill> operand should be added to MI if an instruction was
1826 /// inserted. This ties the instructions together in the post-ra scheduler.
1827 ///
1828 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1829 const TargetRegisterInfo *TRI) const {}
1830
1831 /// Create machine specific model for scheduling.
1832 virtual DFAPacketizer *
1834 return nullptr;
1835 }
1836
1837 /// Sometimes, it is possible for the target
1838 /// to tell, even without aliasing information, that two MIs access different
1839 /// memory addresses. This function returns true if two MIs access different
1840 /// memory addresses and false otherwise.
1841 ///
1842 /// Assumes any physical registers used to compute addresses have the same
1843 /// value for both instructions. (This is the most useful assumption for
1844 /// post-RA scheduling.)
1845 ///
1846 /// See also MachineInstr::mayAlias, which is implemented on top of this
1847 /// function.
1848 virtual bool
1850 const MachineInstr &MIb) const {
1851 assert(MIa.mayLoadOrStore() &&
1852 "MIa must load from or modify a memory location");
1853 assert(MIb.mayLoadOrStore() &&
1854 "MIb must load from or modify a memory location");
1855 return false;
1856 }
1857
1858 /// Return the value to use for the MachineCSE's LookAheadLimit,
1859 /// which is a heuristic used for CSE'ing phys reg defs.
1860 virtual unsigned getMachineCSELookAheadLimit() const {
1861 // The default lookahead is small to prevent unprofitable quadratic
1862 // behavior.
1863 return 5;
1864 }
1865
1866 /// Return the maximal number of alias checks on memory operands. For
1867 /// instructions with more than one memory operands, the alias check on a
1868 /// single MachineInstr pair has quadratic overhead and results in
1869 /// unacceptable performance in the worst case. The limit here is to clamp
1870 /// that maximal checks performed. Usually, that's the product of memory
1871 /// operand numbers from that pair of MachineInstr to be checked. For
1872 /// instance, with two MachineInstrs with 4 and 5 memory operands
1873 /// correspondingly, a total of 20 checks are required. With this limit set to
1874 /// 16, their alias check is skipped. We choose to limit the product instead
1875 /// of the individual instruction as targets may have special MachineInstrs
1876 /// with a considerably high number of memory operands, such as `ldm` in ARM.
1877 /// Setting this limit per MachineInstr would result in either too high
1878 /// overhead or too rigid restriction.
1879 virtual unsigned getMemOperandAACheckLimit() const { return 16; }
1880
1881 /// Return an array that contains the ids of the target indices (used for the
1882 /// TargetIndex machine operand) and their names.
1883 ///
1884 /// MIR Serialization is able to serialize only the target indices that are
1885 /// defined by this method.
1888 return std::nullopt;
1889 }
1890
1891 /// Decompose the machine operand's target flags into two values - the direct
1892 /// target flag value and any of bit flags that are applied.
1893 virtual std::pair<unsigned, unsigned>
1895 return std::make_pair(0u, 0u);
1896 }
1897
1898 /// Return an array that contains the direct target flag values and their
1899 /// names.
1900 ///
1901 /// MIR Serialization is able to serialize only the target flags that are
1902 /// defined by this method.
1905 return std::nullopt;
1906 }
1907
1908 /// Return an array that contains the bitmask target flag values and their
1909 /// names.
1910 ///
1911 /// MIR Serialization is able to serialize only the target flags that are
1912 /// defined by this method.
1915 return std::nullopt;
1916 }
1917
1918 /// Return an array that contains the MMO target flag values and their
1919 /// names.
1920 ///
1921 /// MIR Serialization is able to serialize only the MMO target flags that are
1922 /// defined by this method.
1925 return std::nullopt;
1926 }
1927
1928 /// Determines whether \p Inst is a tail call instruction. Override this
1929 /// method on targets that do not properly set MCID::Return and MCID::Call on
1930 /// tail call instructions."
1931 virtual bool isTailCall(const MachineInstr &Inst) const {
1932 return Inst.isReturn() && Inst.isCall();
1933 }
1934
1935 /// True if the instruction is bound to the top of its basic block and no
1936 /// other instructions shall be inserted before it. This can be implemented
1937 /// to prevent register allocator to insert spills before such instructions.
1938 virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1939 return false;
1940 }
1941
1942 /// During PHI eleimination lets target to make necessary checks and
1943 /// insert the copy to the PHI destination register in a target specific
1944 /// manner.
1947 const DebugLoc &DL, Register Src, Register Dst) const {
1948 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1949 .addReg(Src);
1950 }
1951
1952 /// During PHI eleimination lets target to make necessary checks and
1953 /// insert the copy to the PHI destination register in a target specific
1954 /// manner.
1957 const DebugLoc &DL, Register Src,
1958 unsigned SrcSubReg,
1959 Register Dst) const {
1960 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1961 .addReg(Src, 0, SrcSubReg);
1962 }
1963
1964 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
1965 /// information for a set of outlining candidates.
1967 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1969 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
1970 }
1971
1972 /// Optional target hook to create the LLVM IR attributes for the outlined
1973 /// function. If overridden, the overriding function must call the default
1974 /// implementation.
1976 Function &F, std::vector<outliner::Candidate> &Candidates) const;
1977
1978 /// Returns how or if \p MI should be outlined.
1979 virtual outliner::InstrType
1980 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1982 "Target didn't implement TargetInstrInfo::getOutliningType!");
1983 }
1984
1985 /// Optional target hook that returns true if \p MBB is safe to outline from,
1986 /// and returns any target-specific information in \p Flags.
1988 unsigned &Flags) const;
1989
1990 /// Optional target hook which partitions \p MBB into outlinable ranges for
1991 /// instruction mapping purposes. Each range is defined by two iterators:
1992 /// [start, end).
1993 ///
1994 /// Ranges are expected to be ordered top-down. That is, ranges closer to the
1995 /// top of the block should come before ranges closer to the end of the block.
1996 ///
1997 /// Ranges cannot overlap.
1998 ///
1999 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2000 ///
2001 /// All instructions not present in an outlinable range are considered
2002 /// illegal.
2003 virtual SmallVector<
2004 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2005 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
2006 return {std::make_pair(MBB.begin(), MBB.end())};
2007 }
2008
2009 /// Insert a custom frame for outlined functions.
2011 const outliner::OutlinedFunction &OF) const {
2013 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2014 }
2015
2016 /// Insert a call to an outlined function into the program.
2017 /// Returns an iterator to the spot where we inserted the call. This must be
2018 /// implemented by the target.
2022 outliner::Candidate &C) const {
2024 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2025 }
2026
2027 /// Return true if the function can safely be outlined from.
2028 /// A function \p MF is considered safe for outlining if an outlined function
2029 /// produced from instructions in F will produce a program which produces the
2030 /// same output for any set of given inputs.
2032 bool OutlineFromLinkOnceODRs) const {
2033 llvm_unreachable("Target didn't implement "
2034 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2035 }
2036
2037 /// Return true if the function should be outlined from by default.
2039 return false;
2040 }
2041
2042 /// Produce the expression describing the \p MI loading a value into
2043 /// the physical register \p Reg. This hook should only be used with
2044 /// \p MIs belonging to VReg-less functions.
2045 virtual std::optional<ParamLoadedValue>
2047
2048 /// Given the generic extension instruction \p ExtMI, returns true if this
2049 /// extension is a likely candidate for being folded into an another
2050 /// instruction.
2052 MachineRegisterInfo &MRI) const {
2053 return false;
2054 }
2055
2056 /// Return MIR formatter to format/parse MIR operands. Target can override
2057 /// this virtual function and return target specific MIR formatter.
2058 virtual const MIRFormatter *getMIRFormatter() const {
2059 if (!Formatter.get())
2060 Formatter = std::make_unique<MIRFormatter>();
2061 return Formatter.get();
2062 }
2063
2064 /// Returns the target-specific default value for tail duplication.
2065 /// This value will be used if the tail-dup-placement-threshold argument is
2066 /// not provided.
2067 virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const {
2068 return OptLevel >= CodeGenOpt::Aggressive ? 4 : 2;
2069 }
2070
2071 /// Returns the callee operand from the given \p MI.
2072 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2073 return MI.getOperand(0);
2074 }
2075
2076 /// Return the uniformity behavior of the given instruction.
2077 virtual InstructionUniformity
2080 }
2081
2082 /// Returns true if the given \p MI defines a TargetIndex operand that can be
2083 /// tracked by their offset, can have values, and can have debug info
2084 /// associated with it. If so, sets \p Index and \p Offset of the target index
2085 /// operand.
2087 int64_t &Offset) const {
2088 return false;
2089 }
2090
2091private:
2092 mutable std::unique_ptr<MIRFormatter> Formatter;
2093 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2094 unsigned CatchRetOpcode;
2095 unsigned ReturnOpcode;
2096};
2097
2098/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2101
2103 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2104 RegInfo::getEmptyKey());
2105 }
2106
2108 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2109 RegInfo::getTombstoneKey());
2110 }
2111
2112 /// Reuse getHashValue implementation from
2113 /// std::pair<unsigned, unsigned>.
2114 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2115 std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
2116 return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
2117 }
2118
2121 return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
2122 RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
2123 }
2124};
2125
2126} // end namespace llvm
2127
2128#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
SmallVector< MachineOperand, 4 > Cond
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
uint64_t Size
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Check Debug Module
Contains all data structures shared between the outliner implemented in MachineOutliner....
unsigned const TargetRegisterInfo * TRI
unsigned Reg
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:708
A debug info location.
Definition: DebugLoc.h:33
Itinerary data supplied by a subtarget to be used by a target.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
MIRFormater - Interface to format MIR operand based on target.
Definition: MIRFormatter.h:28
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:862
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:872
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Represents one node in the SelectionDAG.
This class represents the scheduled code.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
virtual void disposed()=0
Called when the loop is being removed.
virtual void adjustTripCount(int TripCountAdjust)=0
Modify the loop such that the trip count is OriginalTC + TripCountAdjust.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const =0
Return true if the given instruction should not be pipelined and should be ignored.
virtual void setPreheader(MachineBasicBlock *NewPreheader)=0
Called when the loop's preheader has been modified to NewPreheader.
virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
Return true if the proposed schedule should used.
virtual std::optional< bool > createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond)=0
Create a condition to determine if the trip count of the loop is greater than TC, where TC is always ...
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook which partitions MBB into outlinable ranges for instruction mapping purposes.
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const
'Reg' is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const
Assumes the instruction is already predicated and returns true if the instruction can be predicated a...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don't consume any machine resources in their current form.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Load the specified register of the given register class from the specified stack frame index.
virtual outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MI should be outlined.
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
virtual void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true,...
virtual const MIRFormatter * getMIRFormatter() const
Return MIR formatter to format/parse MIR operands.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const
Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const
Returns true if MI's Def is NullValueReg, and the MI does not change the Zero value.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of 'Reg' and a use in the current loop.
bool isUnspillableTerminator(const MachineInstr *MI) const
Return true if the given instruction is terminator that is unspillable, according to isUnspillableTer...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it's profitable to unpredicate one side of a 'diamond', i.e.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
virtual int getExtendResourceLenLimit() const
The limit on resource length extension we accept in MachineCombiner Pass.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true,...
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const
Sometimes, it is possible for the target to tell, even without aliasing information,...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
unsigned getReturnOpcode() const
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual bool isIgnorableUse(const MachineOperand &MO) const
Given MO is a PhysReg use return if it can be ignored for the purpose of instruction rematerializatio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient.
virtual unsigned getMemOperandAACheckLimit() const
Return the maximal number of alias checks on memory operands.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
virtual std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const
Target dependent implementation to get the values constituting the address MachineInstr that is acces...
virtual std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const
Target-dependent implementation for IsCopyInstr.
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const
Returns true if MI is an instruction that defines Reg to have a constant value and the value is recor...
static bool isGenericOpcode(unsigned Opc)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
virtual bool isBasicBlockPrologue(const MachineInstr &MI) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understood.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
unsigned getCatchReturnOpcode() const
virtual InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const
Return the uniformity behavior of the given instruction.
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, int64_t &Offset) const
Returns true if the given MI defines a TargetIndex operand that can be tracked by their offset,...
virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const
Allow targets to tell MachineVerifier whether a specific register MachineOperand can be used as part ...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
TargetInstrInfo(const TargetInstrInfo &)=delete
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const
Return an estimate for the code size reduction (in bytes) which will be caused by removing the given ...
virtual ~TargetInstrInfo()
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const
Returns the target-specific default value for tail duplication.
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const
Return the increase in code size needed to predicate a contiguous run of NumInsts instructions.
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
std::pair< unsigned, unsigned > getReassociationOpcodes(MachineCombinerPattern Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned NumLoads, unsigned NumBytes) const
Returns true if the two given memory operations should be scheduled adjacent.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it's safe to move a machine instruction that defines the specified register class.
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const
Return true if the given terminator MI is not expected to spill.
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a physical reg...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static bool isGenericAtomicRMWOpcode(unsigned Opc)
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const
Returns true if the target has a preference on the operands order of the given machine instruction.
static const unsigned CommuteAnyOperandIndex
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual bool shouldHoist(const MachineInstr &MI, const MachineLoop *FromLoop) const
Return false if the instruction should not be hoisted by MachineLICM.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const
Fix up the placeholder we may add in genAlternativeCodeSequence().
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode * > &NewNodes) const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE's LookAheadLimit, which is a heuristic used for CSE'ing ph...
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Level
Code generation optimization level.
Definition: CodeGen.h:57
@ Aggressive
-O3
Definition: CodeGen.h:61
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition: Uniformity.h:21
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
static TargetInstrInfo::RegSubRegPair getEmptyKey()
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
const MachineOperand * Source
DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
const MachineOperand * Destination
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
Used to describe a register and immediate addition.
RegImmPair(Register Reg, int64_t Imm)
Represents a predicate at the MachineFunction level.
bool SingleUseCondition
SingleUseCondition is true if ConditionDef is dead except for the branch(es) at the end of the basic ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
RegSubRegPairAndIdx(Register Reg=Register(), unsigned SubReg=0, unsigned SubIdx=0)
A pair composed of a register and a sub-register index.
bool operator==(const RegSubRegPair &P) const
RegSubRegPair(Register Reg=Register(), unsigned SubReg=0)
bool operator!=(const RegSubRegPair &P) const
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.