LLVM 17.0.0git
TargetInstrInfo.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the target machine instruction set to the code generator.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
14#define LLVM_CODEGEN_TARGETINSTRINFO_H
15
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/Uniformity.h"
29#include "llvm/MC/MCInstrInfo.h"
32#include <cassert>
33#include <cstddef>
34#include <cstdint>
35#include <utility>
36#include <vector>
37
38namespace llvm {
39
40class DFAPacketizer;
41class InstrItineraryData;
42class LiveIntervals;
43class LiveVariables;
44class MachineLoop;
45class MachineMemOperand;
46class MachineRegisterInfo;
47class MCAsmInfo;
48class MCInst;
49struct MCSchedModel;
50class Module;
51class ScheduleDAG;
52class ScheduleDAGMI;
53class ScheduleHazardRecognizer;
54class SDNode;
55class SelectionDAG;
56class SMSchedule;
57class SwingSchedulerDAG;
58class RegScavenger;
59class TargetRegisterClass;
60class TargetRegisterInfo;
61class TargetSchedModel;
62class TargetSubtargetInfo;
63enum class MachineCombinerPattern;
64enum class MachineTraceStrategy;
65
66template <class T> class SmallVectorImpl;
67
68using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
69
73
75 : Destination(&Dest), Source(&Src) {}
76};
77
78/// Used to describe a register and immediate addition.
79struct RegImmPair {
81 int64_t Imm;
82
83 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
84};
85
86/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
87/// It holds the register values, the scale value and the displacement.
91 int64_t Scale;
92 int64_t Displacement;
93};
94
95//---------------------------------------------------------------------------
96///
97/// TargetInstrInfo - Interface to description of machine instruction set
98///
100public:
101 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
102 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
103 : CallFrameSetupOpcode(CFSetupOpcode),
104 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
105 ReturnOpcode(ReturnOpcode) {}
109
110 static bool isGenericOpcode(unsigned Opc) {
111 return Opc <= TargetOpcode::GENERIC_OP_END;
112 }
113
114 static bool isGenericAtomicRMWOpcode(unsigned Opc) {
115 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
116 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
117 }
118
119 /// Given a machine instruction descriptor, returns the register
120 /// class constraint for OpNum, or NULL.
121 virtual
122 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
123 const TargetRegisterInfo *TRI,
124 const MachineFunction &MF) const;
125
126 /// Return true if the instruction is trivially rematerializable, meaning it
127 /// has no side effects and requires no operands that aren't always available.
128 /// This means the only allowed uses are constants and unallocatable physical
129 /// registers so that the instructions result is independent of the place
130 /// in the function.
132 return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
133 (MI.getDesc().isRematerializable() &&
135 isReallyTriviallyReMaterializableGeneric(MI)));
136 }
137
138 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
139 /// of instruction rematerialization or sinking.
140 virtual bool isIgnorableUse(const MachineOperand &MO) const {
141 return false;
142 }
143
144protected:
145 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
146 /// set, this hook lets the target specify whether the instruction is actually
147 /// trivially rematerializable, taking into consideration its operands. This
148 /// predicate must return false if the instruction has any side effects other
149 /// than producing a value, or if it requres any address registers that are
150 /// not always available.
151 /// Requirements must be check as stated in isTriviallyReMaterializable() .
153 return false;
154 }
155
156 /// This method commutes the operands of the given machine instruction MI.
157 /// The operands to be commuted are specified by their indices OpIdx1 and
158 /// OpIdx2.
159 ///
160 /// If a target has any instructions that are commutable but require
161 /// converting to different instructions or making non-trivial changes
162 /// to commute them, this method can be overloaded to do that.
163 /// The default implementation simply swaps the commutable operands.
164 ///
165 /// If NewMI is false, MI is modified in place and returned; otherwise, a
166 /// new machine instruction is created and returned.
167 ///
168 /// Do not call this method for a non-commutable instruction.
169 /// Even though the instruction is commutable, the method may still
170 /// fail to commute the operands, null pointer is returned in such cases.
172 unsigned OpIdx1,
173 unsigned OpIdx2) const;
174
175 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
176 /// operand indices to (ResultIdx1, ResultIdx2).
177 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
178 /// predefined to some indices or be undefined (designated by the special
179 /// value 'CommuteAnyOperandIndex').
180 /// The predefined result indices cannot be re-defined.
181 /// The function returns true iff after the result pair redefinition
182 /// the fixed result pair is equal to or equivalent to the source pair of
183 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
184 /// the pairs (x,y) and (y,x) are equivalent.
185 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
186 unsigned CommutableOpIdx1,
187 unsigned CommutableOpIdx2);
188
189private:
190 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
191 /// set and the target hook isReallyTriviallyReMaterializable returns false,
192 /// this function does target-independent tests to determine if the
193 /// instruction is really trivially rematerializable.
194 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI) const;
195
196public:
197 /// These methods return the opcode of the frame setup/destroy instructions
198 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
199 /// order to abstract away the difference between operating with a frame
200 /// pointer and operating without, through the use of these two instructions.
201 ///
202 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
203 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
204
205 /// Returns true if the argument is a frame pseudo instruction.
206 bool isFrameInstr(const MachineInstr &I) const {
207 return I.getOpcode() == getCallFrameSetupOpcode() ||
208 I.getOpcode() == getCallFrameDestroyOpcode();
209 }
210
211 /// Returns true if the argument is a frame setup pseudo instruction.
212 bool isFrameSetup(const MachineInstr &I) const {
213 return I.getOpcode() == getCallFrameSetupOpcode();
214 }
215
216 /// Returns size of the frame associated with the given frame instruction.
217 /// For frame setup instruction this is frame that is set up space set up
218 /// after the instruction. For frame destroy instruction this is the frame
219 /// freed by the caller.
220 /// Note, in some cases a call frame (or a part of it) may be prepared prior
221 /// to the frame setup instruction. It occurs in the calls that involve
222 /// inalloca arguments. This function reports only the size of the frame part
223 /// that is set up between the frame setup and destroy pseudo instructions.
224 int64_t getFrameSize(const MachineInstr &I) const {
225 assert(isFrameInstr(I) && "Not a frame instruction");
226 assert(I.getOperand(0).getImm() >= 0);
227 return I.getOperand(0).getImm();
228 }
229
230 /// Returns the total frame size, which is made up of the space set up inside
231 /// the pair of frame start-stop instructions and the space that is set up
232 /// prior to the pair.
233 int64_t getFrameTotalSize(const MachineInstr &I) const {
234 if (isFrameSetup(I)) {
235 assert(I.getOperand(1).getImm() >= 0 &&
236 "Frame size must not be negative");
237 return getFrameSize(I) + I.getOperand(1).getImm();
238 }
239 return getFrameSize(I);
240 }
241
242 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
243 unsigned getReturnOpcode() const { return ReturnOpcode; }
244
245 /// Returns the actual stack pointer adjustment made by an instruction
246 /// as part of a call sequence. By default, only call frame setup/destroy
247 /// instructions adjust the stack, but targets may want to override this
248 /// to enable more fine-grained adjustment, or adjust by a different value.
249 virtual int getSPAdjust(const MachineInstr &MI) const;
250
251 /// Return true if the instruction is a "coalescable" extension instruction.
252 /// That is, it's like a copy where it's legal for the source to overlap the
253 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
254 /// expected the pre-extension value is available as a subreg of the result
255 /// register. This also returns the sub-register index in SubIdx.
256 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
257 Register &DstReg, unsigned &SubIdx) const {
258 return false;
259 }
260
261 /// If the specified machine instruction is a direct
262 /// load from a stack slot, return the virtual or physical register number of
263 /// the destination along with the FrameIndex of the loaded stack slot. If
264 /// not, return 0. This predicate must return 0 if the instruction has
265 /// any side effects other than loading from the stack slot.
266 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
267 int &FrameIndex) const {
268 return 0;
269 }
270
271 /// Optional extension of isLoadFromStackSlot that returns the number of
272 /// bytes loaded from the stack. This must be implemented if a backend
273 /// supports partial stack slot spills/loads to further disambiguate
274 /// what the load does.
275 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
276 int &FrameIndex,
277 unsigned &MemBytes) const {
278 MemBytes = 0;
279 return isLoadFromStackSlot(MI, FrameIndex);
280 }
281
282 /// Check for post-frame ptr elimination stack locations as well.
283 /// This uses a heuristic so it isn't reliable for correctness.
285 int &FrameIndex) const {
286 return 0;
287 }
288
289 /// If the specified machine instruction has a load from a stack slot,
290 /// return true along with the FrameIndices of the loaded stack slot and the
291 /// machine mem operands containing the reference.
292 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
293 /// any instructions that loads from the stack. This is just a hint, as some
294 /// cases may be missed.
295 virtual bool hasLoadFromStackSlot(
296 const MachineInstr &MI,
298
299 /// If the specified machine instruction is a direct
300 /// store to a stack slot, return the virtual or physical register number of
301 /// the source reg along with the FrameIndex of the loaded stack slot. If
302 /// not, return 0. This predicate must return 0 if the instruction has
303 /// any side effects other than storing to the stack slot.
304 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
305 int &FrameIndex) const {
306 return 0;
307 }
308
309 /// Optional extension of isStoreToStackSlot that returns the number of
310 /// bytes stored to the stack. This must be implemented if a backend
311 /// supports partial stack slot spills/loads to further disambiguate
312 /// what the store does.
313 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
314 int &FrameIndex,
315 unsigned &MemBytes) const {
316 MemBytes = 0;
317 return isStoreToStackSlot(MI, FrameIndex);
318 }
319
320 /// Check for post-frame ptr elimination stack locations as well.
321 /// This uses a heuristic, so it isn't reliable for correctness.
322 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
323 int &FrameIndex) const {
324 return 0;
325 }
326
327 /// If the specified machine instruction has a store to a stack slot,
328 /// return true along with the FrameIndices of the loaded stack slot and the
329 /// machine mem operands containing the reference.
330 /// If not, return false. Unlike isStoreToStackSlot,
331 /// this returns true for any instructions that stores to the
332 /// stack. This is just a hint, as some cases may be missed.
333 virtual bool hasStoreToStackSlot(
334 const MachineInstr &MI,
336
337 /// Return true if the specified machine instruction
338 /// is a copy of one stack slot to another and has no other effect.
339 /// Provide the identity of the two frame indices.
340 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
341 int &SrcFrameIndex) const {
342 return false;
343 }
344
345 /// Compute the size in bytes and offset within a stack slot of a spilled
346 /// register or subregister.
347 ///
348 /// \param [out] Size in bytes of the spilled value.
349 /// \param [out] Offset in bytes within the stack slot.
350 /// \returns true if both Size and Offset are successfully computed.
351 ///
352 /// Not all subregisters have computable spill slots. For example,
353 /// subregisters registers may not be byte-sized, and a pair of discontiguous
354 /// subregisters has no single offset.
355 ///
356 /// Targets with nontrivial bigendian implementations may need to override
357 /// this, particularly to support spilled vector registers.
358 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
359 unsigned &Size, unsigned &Offset,
360 const MachineFunction &MF) const;
361
362 /// Return true if the given instruction is terminator that is unspillable,
363 /// according to isUnspillableTerminatorImpl.
365 return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
366 }
367
368 /// Returns the size in bytes of the specified MachineInstr, or ~0U
369 /// when this function is not implemented by a target.
370 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
371 return ~0U;
372 }
373
374 /// Return true if the instruction is as cheap as a move instruction.
375 ///
376 /// Targets for different archs need to override this, and different
377 /// micro-architectures can also be finely tuned inside.
378 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
379 return MI.isAsCheapAsAMove();
380 }
381
382 /// Return true if the instruction should be sunk by MachineSink.
383 ///
384 /// MachineSink determines on its own whether the instruction is safe to sink;
385 /// this gives the target a hook to override the default behavior with regards
386 /// to which instructions should be sunk.
387 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
388
389 /// Return false if the instruction should not be hoisted by MachineLICM.
390 ///
391 /// MachineLICM determines on its own whether the instruction is safe to
392 /// hoist; this gives the target a hook to extend this assessment and prevent
393 /// an instruction being hoisted from a given loop for target specific
394 /// reasons.
395 virtual bool shouldHoist(const MachineInstr &MI,
396 const MachineLoop *FromLoop) const {
397 return true;
398 }
399
400 /// Re-issue the specified 'original' instruction at the
401 /// specific location targeting a new destination register.
402 /// The register in Orig->getOperand(0).getReg() will be substituted by
403 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
404 /// SubIdx.
405 virtual void reMaterialize(MachineBasicBlock &MBB,
407 unsigned SubIdx, const MachineInstr &Orig,
408 const TargetRegisterInfo &TRI) const;
409
410 /// Clones instruction or the whole instruction bundle \p Orig and
411 /// insert into \p MBB before \p InsertBefore. The target may update operands
412 /// that are required to be unique.
413 ///
414 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
416 MachineBasicBlock::iterator InsertBefore,
417 const MachineInstr &Orig) const;
418
419 /// This method must be implemented by targets that
420 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
421 /// may be able to convert a two-address instruction into one or more true
422 /// three-address instructions on demand. This allows the X86 target (for
423 /// example) to convert ADD and SHL instructions into LEA instructions if they
424 /// would require register copies due to two-addressness.
425 ///
426 /// This method returns a null pointer if the transformation cannot be
427 /// performed, otherwise it returns the last new instruction.
428 ///
429 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
430 /// replacing \p MI with new instructions, even though this function does not
431 /// remove MI.
433 LiveVariables *LV,
434 LiveIntervals *LIS) const {
435 return nullptr;
436 }
437
438 // This constant can be used as an input value of operand index passed to
439 // the method findCommutedOpIndices() to tell the method that the
440 // corresponding operand index is not pre-defined and that the method
441 // can pick any commutable operand.
442 static const unsigned CommuteAnyOperandIndex = ~0U;
443
444 /// This method commutes the operands of the given machine instruction MI.
445 ///
446 /// The operands to be commuted are specified by their indices OpIdx1 and
447 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
448 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
449 /// any arbitrarily chosen commutable operand. If both arguments are set to
450 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
451 /// operands; then commutes them if such operands could be found.
452 ///
453 /// If NewMI is false, MI is modified in place and returned; otherwise, a
454 /// new machine instruction is created and returned.
455 ///
456 /// Do not call this method for a non-commutable instruction or
457 /// for non-commuable operands.
458 /// Even though the instruction is commutable, the method may still
459 /// fail to commute the operands, null pointer is returned in such cases.
461 commuteInstruction(MachineInstr &MI, bool NewMI = false,
462 unsigned OpIdx1 = CommuteAnyOperandIndex,
463 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
464
465 /// Returns true iff the routine could find two commutable operands in the
466 /// given machine instruction.
467 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
468 /// If any of the INPUT values is set to the special value
469 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
470 /// operand, then returns its index in the corresponding argument.
471 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
472 /// looks for 2 commutable operands.
473 /// If INPUT values refer to some operands of MI, then the method simply
474 /// returns true if the corresponding operands are commutable and returns
475 /// false otherwise.
476 ///
477 /// For example, calling this method this way:
478 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
479 /// findCommutedOpIndices(MI, Op1, Op2);
480 /// can be interpreted as a query asking to find an operand that would be
481 /// commutable with the operand#1.
482 virtual bool findCommutedOpIndices(const MachineInstr &MI,
483 unsigned &SrcOpIdx1,
484 unsigned &SrcOpIdx2) const;
485
486 /// Returns true if the target has a preference on the operands order of
487 /// the given machine instruction. And specify if \p Commute is required to
488 /// get the desired operands order.
489 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
490 return false;
491 }
492
493 /// A pair composed of a register and a sub-register index.
494 /// Used to give some type checking when modeling Reg:SubReg.
497 unsigned SubReg;
498
500 : Reg(Reg), SubReg(SubReg) {}
501
502 bool operator==(const RegSubRegPair& P) const {
503 return Reg == P.Reg && SubReg == P.SubReg;
504 }
505 bool operator!=(const RegSubRegPair& P) const {
506 return !(*this == P);
507 }
508 };
509
510 /// A pair composed of a pair of a register and a sub-register index,
511 /// and another sub-register index.
512 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
514 unsigned SubIdx;
515
517 unsigned SubIdx = 0)
519 };
520
521 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
522 /// and \p DefIdx.
523 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
524 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
525 /// flag are not added to this list.
526 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
527 /// two elements:
528 /// - %1:sub1, sub0
529 /// - %2<:0>, sub1
530 ///
531 /// \returns true if it is possible to build such an input sequence
532 /// with the pair \p MI, \p DefIdx. False otherwise.
533 ///
534 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
535 ///
536 /// \note The generic implementation does not provide any support for
537 /// MI.isRegSequenceLike(). In other words, one has to override
538 /// getRegSequenceLikeInputs for target specific instructions.
539 bool
540 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
541 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
542
543 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
544 /// and \p DefIdx.
545 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
546 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
547 /// - %1:sub1, sub0
548 ///
549 /// \returns true if it is possible to build such an input sequence
550 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
551 /// False otherwise.
552 ///
553 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
554 ///
555 /// \note The generic implementation does not provide any support for
556 /// MI.isExtractSubregLike(). In other words, one has to override
557 /// getExtractSubregLikeInputs for target specific instructions.
558 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
559 RegSubRegPairAndIdx &InputReg) const;
560
561 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
562 /// and \p DefIdx.
563 /// \p [out] BaseReg and \p [out] InsertedReg contain
564 /// the equivalent inputs of INSERT_SUBREG.
565 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
566 /// - BaseReg: %0:sub0
567 /// - InsertedReg: %1:sub1, sub3
568 ///
569 /// \returns true if it is possible to build such an input sequence
570 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
571 /// False otherwise.
572 ///
573 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
574 ///
575 /// \note The generic implementation does not provide any support for
576 /// MI.isInsertSubregLike(). In other words, one has to override
577 /// getInsertSubregLikeInputs for target specific instructions.
578 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
579 RegSubRegPair &BaseReg,
580 RegSubRegPairAndIdx &InsertedReg) const;
581
582 /// Return true if two machine instructions would produce identical values.
583 /// By default, this is only true when the two instructions
584 /// are deemed identical except for defs. If this function is called when the
585 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
586 /// aggressive checks.
587 virtual bool produceSameValue(const MachineInstr &MI0,
588 const MachineInstr &MI1,
589 const MachineRegisterInfo *MRI = nullptr) const;
590
591 /// \returns true if a branch from an instruction with opcode \p BranchOpc
592 /// bytes is capable of jumping to a position \p BrOffset bytes away.
593 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
594 int64_t BrOffset) const {
595 llvm_unreachable("target did not implement");
596 }
597
598 /// \returns The block that branch instruction \p MI jumps to.
600 llvm_unreachable("target did not implement");
601 }
602
603 /// Insert an unconditional indirect branch at the end of \p MBB to \p
604 /// NewDestBB. Optionally, insert the clobbered register restoring in \p
605 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
606 /// the offset of the position to insert the new branch.
608 MachineBasicBlock &NewDestBB,
609 MachineBasicBlock &RestoreBB,
610 const DebugLoc &DL, int64_t BrOffset = 0,
611 RegScavenger *RS = nullptr) const {
612 llvm_unreachable("target did not implement");
613 }
614
615 /// Analyze the branching code at the end of MBB, returning
616 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
617 /// implemented for a target). Upon success, this returns false and returns
618 /// with the following information in various cases:
619 ///
620 /// 1. If this block ends with no branches (it just falls through to its succ)
621 /// just return false, leaving TBB/FBB null.
622 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
623 /// the destination block.
624 /// 3. If this block ends with a conditional branch and it falls through to a
625 /// successor block, it sets TBB to be the branch destination block and a
626 /// list of operands that evaluate the condition. These operands can be
627 /// passed to other TargetInstrInfo methods to create new branches.
628 /// 4. If this block ends with a conditional branch followed by an
629 /// unconditional branch, it returns the 'true' destination in TBB, the
630 /// 'false' destination in FBB, and a list of operands that evaluate the
631 /// condition. These operands can be passed to other TargetInstrInfo
632 /// methods to create new branches.
633 ///
634 /// Note that removeBranch and insertBranch must be implemented to support
635 /// cases where this method returns success.
636 ///
637 /// If AllowModify is true, then this routine is allowed to modify the basic
638 /// block (e.g. delete instructions after the unconditional branch).
639 ///
640 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
641 /// before calling this function.
643 MachineBasicBlock *&FBB,
645 bool AllowModify = false) const {
646 return true;
647 }
648
649 /// Represents a predicate at the MachineFunction level. The control flow a
650 /// MachineBranchPredicate represents is:
651 ///
652 /// Reg = LHS `Predicate` RHS == ConditionDef
653 /// if Reg then goto TrueDest else goto FalseDest
654 ///
657 PRED_EQ, // True if two values are equal
658 PRED_NE, // True if two values are not equal
659 PRED_INVALID // Sentinel value
660 };
661
668
669 /// SingleUseCondition is true if ConditionDef is dead except for the
670 /// branch(es) at the end of the basic block.
671 ///
672 bool SingleUseCondition = false;
673
674 explicit MachineBranchPredicate() = default;
675 };
676
677 /// Analyze the branching code at the end of MBB and parse it into the
678 /// MachineBranchPredicate structure if possible. Returns false on success
679 /// and true on failure.
680 ///
681 /// If AllowModify is true, then this routine is allowed to modify the basic
682 /// block (e.g. delete instructions after the unconditional branch).
683 ///
686 bool AllowModify = false) const {
687 return true;
688 }
689
690 /// Remove the branching code at the end of the specific MBB.
691 /// This is only invoked in cases where analyzeBranch returns success. It
692 /// returns the number of instructions that were removed.
693 /// If \p BytesRemoved is non-null, report the change in code size from the
694 /// removed instructions.
696 int *BytesRemoved = nullptr) const {
697 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
698 }
699
700 /// Insert branch code into the end of the specified MachineBasicBlock. The
701 /// operands to this method are the same as those returned by analyzeBranch.
702 /// This is only invoked in cases where analyzeBranch returns success. It
703 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
704 /// report the change in code size from the added instructions.
705 ///
706 /// It is also invoked by tail merging to add unconditional branches in
707 /// cases where analyzeBranch doesn't apply because there was no original
708 /// branch to analyze. At least this much must be implemented, else tail
709 /// merging needs to be disabled.
710 ///
711 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
712 /// before calling this function.
716 const DebugLoc &DL,
717 int *BytesAdded = nullptr) const {
718 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
719 }
720
722 MachineBasicBlock *DestBB,
723 const DebugLoc &DL,
724 int *BytesAdded = nullptr) const {
725 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
726 BytesAdded);
727 }
728
729 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
730 /// implementations to query attributes of the loop being pipelined and to
731 /// apply target-specific updates to the loop once pipelining is complete.
733 public:
735 /// Return true if the given instruction should not be pipelined and should
736 /// be ignored. An example could be a loop comparison, or induction variable
737 /// update with no users being pipelined.
738 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
739
740 /// Return true if the proposed schedule should used. Otherwise return
741 /// false to not pipeline the loop. This function should be used to ensure
742 /// that pipelined loops meet target-specific quality heuristics.
744 return true;
745 }
746
747 /// Create a condition to determine if the trip count of the loop is greater
748 /// than TC, where TC is always one more than for the previous prologue or
749 /// 0 if this is being called for the outermost prologue.
750 ///
751 /// If the trip count is statically known to be greater than TC, return
752 /// true. If the trip count is statically known to be not greater than TC,
753 /// return false. Otherwise return nullopt and fill out Cond with the test
754 /// condition.
755 ///
756 /// Note: This hook is guaranteed to be called from the innermost to the
757 /// outermost prologue of the loop being software pipelined.
758 virtual std::optional<bool>
761
762 /// Modify the loop such that the trip count is
763 /// OriginalTC + TripCountAdjust.
764 virtual void adjustTripCount(int TripCountAdjust) = 0;
765
766 /// Called when the loop's preheader has been modified to NewPreheader.
767 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
768
769 /// Called when the loop is being removed. Any instructions in the preheader
770 /// should be removed.
771 ///
772 /// Once this function is called, no other functions on this object are
773 /// valid; the loop has been removed.
774 virtual void disposed() = 0;
775 };
776
777 /// Analyze loop L, which must be a single-basic-block loop, and if the
778 /// conditions can be understood enough produce a PipelinerLoopInfo object.
779 virtual std::unique_ptr<PipelinerLoopInfo>
781 return nullptr;
782 }
783
784 /// Analyze the loop code, return true if it cannot be understood. Upon
785 /// success, this function returns false and returns information about the
786 /// induction variable and compare instruction used at the end.
787 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
788 MachineInstr *&CmpInst) const {
789 return true;
790 }
791
792 /// Generate code to reduce the loop iteration by one and check if the loop
793 /// is finished. Return the value/register of the new loop count. We need
794 /// this function when peeling off one or more iterations of a loop. This
795 /// function assumes the nth iteration is peeled first.
797 MachineBasicBlock &PreHeader,
798 MachineInstr *IndVar, MachineInstr &Cmp,
801 unsigned Iter, unsigned MaxIter) const {
802 llvm_unreachable("Target didn't implement ReduceLoopCount");
803 }
804
805 /// Delete the instruction OldInst and everything after it, replacing it with
806 /// an unconditional branch to NewDest. This is used by the tail merging pass.
808 MachineBasicBlock *NewDest) const;
809
810 /// Return true if it's legal to split the given basic
811 /// block at the specified instruction (i.e. instruction would be the start
812 /// of a new basic block).
815 return true;
816 }
817
818 /// Return true if it's profitable to predicate
819 /// instructions with accumulated instruction latency of "NumCycles"
820 /// of the specified basic block, where the probability of the instructions
821 /// being executed is given by Probability, and Confidence is a measure
822 /// of our confidence that it will be properly predicted.
823 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
824 unsigned ExtraPredCycles,
825 BranchProbability Probability) const {
826 return false;
827 }
828
829 /// Second variant of isProfitableToIfCvt. This one
830 /// checks for the case where two basic blocks from true and false path
831 /// of a if-then-else (diamond) are predicated on mutually exclusive
832 /// predicates, where the probability of the true path being taken is given
833 /// by Probability, and Confidence is a measure of our confidence that it
834 /// will be properly predicted.
835 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
836 unsigned ExtraTCycles,
837 MachineBasicBlock &FMBB, unsigned NumFCycles,
838 unsigned ExtraFCycles,
839 BranchProbability Probability) const {
840 return false;
841 }
842
843 /// Return true if it's profitable for if-converter to duplicate instructions
844 /// of specified accumulated instruction latencies in the specified MBB to
845 /// enable if-conversion.
846 /// The probability of the instructions being executed is given by
847 /// Probability, and Confidence is a measure of our confidence that it
848 /// will be properly predicted.
850 unsigned NumCycles,
851 BranchProbability Probability) const {
852 return false;
853 }
854
855 /// Return the increase in code size needed to predicate a contiguous run of
856 /// NumInsts instructions.
858 unsigned NumInsts) const {
859 return 0;
860 }
861
862 /// Return an estimate for the code size reduction (in bytes) which will be
863 /// caused by removing the given branch instruction during if-conversion.
864 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
865 return getInstSizeInBytes(MI);
866 }
867
868 /// Return true if it's profitable to unpredicate
869 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
870 /// exclusive predicates.
871 /// e.g.
872 /// subeq r0, r1, #1
873 /// addne r0, r1, #1
874 /// =>
875 /// sub r0, r1, #1
876 /// addne r0, r1, #1
877 ///
878 /// This may be profitable is conditional instructions are always executed.
880 MachineBasicBlock &FMBB) const {
881 return false;
882 }
883
884 /// Return true if it is possible to insert a select
885 /// instruction that chooses between TrueReg and FalseReg based on the
886 /// condition code in Cond.
887 ///
888 /// When successful, also return the latency in cycles from TrueReg,
889 /// FalseReg, and Cond to the destination register. In most cases, a select
890 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
891 ///
892 /// Some x86 implementations have 2-cycle cmov instructions.
893 ///
894 /// @param MBB Block where select instruction would be inserted.
895 /// @param Cond Condition returned by analyzeBranch.
896 /// @param DstReg Virtual dest register that the result should write to.
897 /// @param TrueReg Virtual register to select when Cond is true.
898 /// @param FalseReg Virtual register to select when Cond is false.
899 /// @param CondCycles Latency from Cond+Branch to select output.
900 /// @param TrueCycles Latency from TrueReg to select output.
901 /// @param FalseCycles Latency from FalseReg to select output.
904 Register TrueReg, Register FalseReg,
905 int &CondCycles, int &TrueCycles,
906 int &FalseCycles) const {
907 return false;
908 }
909
910 /// Insert a select instruction into MBB before I that will copy TrueReg to
911 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
912 ///
913 /// This function can only be called after canInsertSelect() returned true.
914 /// The condition in Cond comes from analyzeBranch, and it can be assumed
915 /// that the same flags or registers required by Cond are available at the
916 /// insertion point.
917 ///
918 /// @param MBB Block where select instruction should be inserted.
919 /// @param I Insertion point.
920 /// @param DL Source location for debugging.
921 /// @param DstReg Virtual register to be defined by select instruction.
922 /// @param Cond Condition as computed by analyzeBranch.
923 /// @param TrueReg Virtual register to copy when Cond is true.
924 /// @param FalseReg Virtual register to copy when Cons is false.
928 Register TrueReg, Register FalseReg) const {
929 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
930 }
931
932 /// Analyze the given select instruction, returning true if
933 /// it cannot be understood. It is assumed that MI->isSelect() is true.
934 ///
935 /// When successful, return the controlling condition and the operands that
936 /// determine the true and false result values.
937 ///
938 /// Result = SELECT Cond, TrueOp, FalseOp
939 ///
940 /// Some targets can optimize select instructions, for example by predicating
941 /// the instruction defining one of the operands. Such targets should set
942 /// Optimizable.
943 ///
944 /// @param MI Select instruction to analyze.
945 /// @param Cond Condition controlling the select.
946 /// @param TrueOp Operand number of the value selected when Cond is true.
947 /// @param FalseOp Operand number of the value selected when Cond is false.
948 /// @param Optimizable Returned as true if MI is optimizable.
949 /// @returns False on success.
950 virtual bool analyzeSelect(const MachineInstr &MI,
952 unsigned &TrueOp, unsigned &FalseOp,
953 bool &Optimizable) const {
954 assert(MI.getDesc().isSelect() && "MI must be a select instruction");
955 return true;
956 }
957
958 /// Given a select instruction that was understood by
959 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
960 /// merging it with one of its operands. Returns NULL on failure.
961 ///
962 /// When successful, returns the new select instruction. The client is
963 /// responsible for deleting MI.
964 ///
965 /// If both sides of the select can be optimized, PreferFalse is used to pick
966 /// a side.
967 ///
968 /// @param MI Optimizable select instruction.
969 /// @param NewMIs Set that record all MIs in the basic block up to \p
970 /// MI. Has to be updated with any newly created MI or deleted ones.
971 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
972 /// @returns Optimized instruction or NULL.
975 bool PreferFalse = false) const {
976 // This function must be implemented if Optimizable is ever set.
977 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
978 }
979
980 /// Emit instructions to copy a pair of physical registers.
981 ///
982 /// This function should support copies within any legal register class as
983 /// well as any cross-class copies created during instruction selection.
984 ///
985 /// The source and destination registers may overlap, which may require a
986 /// careful implementation when multiple copy instructions are required for
987 /// large registers. See for example the ARM target.
990 MCRegister DestReg, MCRegister SrcReg,
991 bool KillSrc) const {
992 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
993 }
994
995 /// Allow targets to tell MachineVerifier whether a specific register
996 /// MachineOperand can be used as part of PC-relative addressing.
997 /// PC-relative addressing modes in many CISC architectures contain
998 /// (non-PC) registers as offsets or scaling values, which inherently
999 /// tags the corresponding MachineOperand with OPERAND_PCREL.
1000 ///
1001 /// @param MO The MachineOperand in question. MO.isReg() should always
1002 /// be true.
1003 /// @return Whether this operand is allowed to be used PC-relatively.
1004 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
1005 return false;
1006 }
1007
1008 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
1009 /// using a jump table, otherwise -1.
1010 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }
1011
1012protected:
1013 /// Target-dependent implementation for IsCopyInstr.
1014 /// If the specific machine instruction is a instruction that moves/copies
1015 /// value from one register to another register return destination and source
1016 /// registers as machine operands.
1017 virtual std::optional<DestSourcePair>
1019 return std::nullopt;
1020 }
1021
1022 /// Return true if the given terminator MI is not expected to spill. This
1023 /// sets the live interval as not spillable and adjusts phi node lowering to
1024 /// not introduce copies after the terminator. Use with care, these are
1025 /// currently used for hardware loop intrinsics in very controlled situations,
1026 /// created prior to registry allocation in loops that only have single phi
1027 /// users for the terminators value. They may run out of registers if not used
1028 /// carefully.
1029 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
1030 return false;
1031 }
1032
1033public:
1034 /// If the specific machine instruction is a instruction that moves/copies
1035 /// value from one register to another register return destination and source
1036 /// registers as machine operands.
1037 /// For COPY-instruction the method naturally returns destination and source
1038 /// registers as machine operands, for all other instructions the method calls
1039 /// target-dependent implementation.
1040 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
1041 if (MI.isCopy()) {
1042 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
1043 }
1044 return isCopyInstrImpl(MI);
1045 }
1046
1047 /// If the specific machine instruction is an instruction that adds an
1048 /// immediate value and a physical register, and stores the result in
1049 /// the given physical register \c Reg, return a pair of the source
1050 /// register and the offset which has been added.
1051 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
1052 Register Reg) const {
1053 return std::nullopt;
1054 }
1055
1056 /// Returns true if MI is an instruction that defines Reg to have a constant
1057 /// value and the value is recorded in ImmVal. The ImmVal is a result that
1058 /// should be interpreted as modulo size of Reg.
1060 const Register Reg,
1061 int64_t &ImmVal) const {
1062 return false;
1063 }
1064
1065 /// Store the specified register of the given register class to the specified
1066 /// stack frame index. The store instruction is to be added to the given
1067 /// machine basic block before the specified machine instruction. If isKill
1068 /// is true, the register operand is the last use and must be marked kill. If
1069 /// \p SrcReg is being directly spilled as part of assigning a virtual
1070 /// register, \p VReg is the register being assigned. This additional register
1071 /// argument is needed for certain targets when invoked from RegAllocFast to
1072 /// map the spilled physical register to its virtual register. A null register
1073 /// can be passed elsewhere.
1076 Register SrcReg, bool isKill, int FrameIndex,
1077 const TargetRegisterClass *RC,
1078 const TargetRegisterInfo *TRI,
1079 Register VReg) const {
1080 llvm_unreachable("Target didn't implement "
1081 "TargetInstrInfo::storeRegToStackSlot!");
1082 }
1083
1084 /// Load the specified register of the given register class from the specified
1085 /// stack frame index. The load instruction is to be added to the given
1086 /// machine basic block before the specified machine instruction. If \p
1087 /// DestReg is being directly reloaded as part of assigning a virtual
1088 /// register, \p VReg is the register being assigned. This additional register
1089 /// argument is needed for certain targets when invoked from RegAllocFast to
1090 /// map the loaded physical register to its virtual register. A null register
1091 /// can be passed elsewhere.
1094 Register DestReg, int FrameIndex,
1095 const TargetRegisterClass *RC,
1096 const TargetRegisterInfo *TRI,
1097 Register VReg) const {
1098 llvm_unreachable("Target didn't implement "
1099 "TargetInstrInfo::loadRegFromStackSlot!");
1100 }
1101
1102 /// This function is called for all pseudo instructions
1103 /// that remain after register allocation. Many pseudo instructions are
1104 /// created to help register allocation. This is the place to convert them
1105 /// into real instructions. The target can edit MI in place, or it can insert
1106 /// new instructions and erase MI. The function should return true if
1107 /// anything was changed.
1108 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
1109
1110 /// Check whether the target can fold a load that feeds a subreg operand
1111 /// (or a subreg operand that feeds a store).
1112 /// For example, X86 may want to return true if it can fold
1113 /// movl (%esp), %eax
1114 /// subb, %al, ...
1115 /// Into:
1116 /// subb (%esp), ...
1117 ///
1118 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
1119 /// reject subregs - but since this behavior used to be enforced in the
1120 /// target-independent code, moving this responsibility to the targets
1121 /// has the potential of causing nasty silent breakage in out-of-tree targets.
1122 virtual bool isSubregFoldable() const { return false; }
1123
1124 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
1125 /// operands which can't be folded into stack references. Operands outside
1126 /// of the range are most likely foldable but it is not guaranteed.
1127 /// These instructions are unique in that stack references for some operands
1128 /// have the same execution cost (e.g. none) as the unfolded register forms.
1129 /// The ranged return is guaranteed to include all operands which can't be
1130 /// folded at zero cost.
1131 virtual std::pair<unsigned, unsigned>
1133
1134 /// Attempt to fold a load or store of the specified stack
1135 /// slot into the specified machine instruction for the specified operand(s).
1136 /// If this is possible, a new instruction is returned with the specified
1137 /// operand folded, otherwise NULL is returned.
1138 /// The new instruction is inserted before MI, and the client is responsible
1139 /// for removing the old instruction.
1140 /// If VRM is passed, the assigned physregs can be inspected by target to
1141 /// decide on using an opcode (note that those assignments can still change).
1143 int FI,
1144 LiveIntervals *LIS = nullptr,
1145 VirtRegMap *VRM = nullptr) const;
1146
1147 /// Same as the previous version except it allows folding of any load and
1148 /// store from / to any address, not just from a specific stack slot.
1150 MachineInstr &LoadMI,
1151 LiveIntervals *LIS = nullptr) const;
1152
1153 /// Return true when there is potentially a faster code sequence
1154 /// for an instruction chain ending in \p Root. All potential patterns are
1155 /// returned in the \p Pattern vector. Pattern should be sorted in priority
1156 /// order since the pattern evaluator stops checking as soon as it finds a
1157 /// faster sequence.
1158 /// \param Root - Instruction that could be combined with one of its operands
1159 /// \param Patterns - Vector of possible combination patterns
1160 virtual bool
1163 bool DoRegPressureReduce) const;
1164
1165 /// Return true if target supports reassociation of instructions in machine
1166 /// combiner pass to reduce register pressure for a given BB.
1167 virtual bool
1169 const RegisterClassInfo *RegClassInfo) const {
1170 return false;
1171 }
1172
1173 /// Fix up the placeholder we may add in genAlternativeCodeSequence().
1174 virtual void
1176 SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
1177
1178 /// Return true when a code sequence can improve throughput. It
1179 /// should be called only for instructions in loops.
1180 /// \param Pattern - combiner pattern
1182
1183 /// Return true if the input \P Inst is part of a chain of dependent ops
1184 /// that are suitable for reassociation, otherwise return false.
1185 /// If the instruction's operands must be commuted to have a previous
1186 /// instruction of the same type define the first source operand, \P Commuted
1187 /// will be set to true.
1188 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1189
1190 /// Return true when \P Inst is both associative and commutative. If \P Invert
1191 /// is true, then the inverse of \P Inst operation must be tested.
1193 bool Invert = false) const {
1194 return false;
1195 }
1196
1197 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
1198 /// for sub and vice versa).
1199 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
1200 return std::nullopt;
1201 }
1202
1203 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
1204 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;
1205
1206 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1207 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1208 const MachineBasicBlock *MBB) const;
1209
1210 /// Return true when \P Inst has reassociable sibling.
1211 virtual bool hasReassociableSibling(const MachineInstr &Inst,
1212 bool &Commuted) const;
1213
1214 /// When getMachineCombinerPatterns() finds patterns, this function generates
1215 /// the instructions that could replace the original code sequence. The client
1216 /// has to decide whether the actual replacement is beneficial or not.
1217 /// \param Root - Instruction that could be combined with one of its operands
1218 /// \param Pattern - Combination pattern for Root
1219 /// \param InsInstrs - Vector of new instructions that implement P
1220 /// \param DelInstrs - Old instructions, including Root, that could be
1221 /// replaced by InsInstr
1222 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1223 /// InsInstr that defines it
1224 virtual void genAlternativeCodeSequence(
1228 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
1229
1230 /// When calculate the latency of the root instruction, accumulate the
1231 /// latency of the sequence to the root latency.
1232 /// \param Root - Instruction that could be combined with one of its operands
1234 return true;
1235 }
1236
1237 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1238 /// reduce critical path length.
1239 void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
1243 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
1244
1245 /// Reassociation of some instructions requires inverse operations (e.g.
1246 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
1247 /// (new root opcode, new prev opcode) that must be used to reassociate \P
1248 /// Root and \P Prev accoring to \P Pattern.
1249 std::pair<unsigned, unsigned>
1251 const MachineInstr &Root,
1252 const MachineInstr &Prev) const;
1253
1254 /// The limit on resource length extension we accept in MachineCombiner Pass.
1255 virtual int getExtendResourceLenLimit() const { return 0; }
1256
1257 /// This is an architecture-specific helper function of reassociateOps.
1258 /// Set special operand attributes for new instructions after reassociation.
1259 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1260 MachineInstr &NewMI1,
1261 MachineInstr &NewMI2) const {}
1262
1263 /// Return true when a target supports MachineCombiner.
1264 virtual bool useMachineCombiner() const { return false; }
1265
1266 /// Return a strategy that MachineCombiner must use when creating traces.
1268
1269 /// Return true if the given SDNode can be copied during scheduling
1270 /// even if it has glue.
1271 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1272
1273protected:
1274 /// Target-dependent implementation for foldMemoryOperand.
1275 /// Target-independent code in foldMemoryOperand will
1276 /// take care of adding a MachineMemOperand to the newly created instruction.
1277 /// The instruction and any auxiliary instructions necessary will be inserted
1278 /// at InsertPt.
1279 virtual MachineInstr *
1282 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1283 LiveIntervals *LIS = nullptr,
1284 VirtRegMap *VRM = nullptr) const {
1285 return nullptr;
1286 }
1287
1288 /// Target-dependent implementation for foldMemoryOperand.
1289 /// Target-independent code in foldMemoryOperand will
1290 /// take care of adding a MachineMemOperand to the newly created instruction.
1291 /// The instruction and any auxiliary instructions necessary will be inserted
1292 /// at InsertPt.
1295 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1296 LiveIntervals *LIS = nullptr) const {
1297 return nullptr;
1298 }
1299
1300 /// Target-dependent implementation of getRegSequenceInputs.
1301 ///
1302 /// \returns true if it is possible to build the equivalent
1303 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1304 ///
1305 /// \pre MI.isRegSequenceLike().
1306 ///
1307 /// \see TargetInstrInfo::getRegSequenceInputs.
1309 const MachineInstr &MI, unsigned DefIdx,
1310 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1311 return false;
1312 }
1313
1314 /// Target-dependent implementation of getExtractSubregInputs.
1315 ///
1316 /// \returns true if it is possible to build the equivalent
1317 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1318 ///
1319 /// \pre MI.isExtractSubregLike().
1320 ///
1321 /// \see TargetInstrInfo::getExtractSubregInputs.
1323 unsigned DefIdx,
1324 RegSubRegPairAndIdx &InputReg) const {
1325 return false;
1326 }
1327
1328 /// Target-dependent implementation of getInsertSubregInputs.
1329 ///
1330 /// \returns true if it is possible to build the equivalent
1331 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1332 ///
1333 /// \pre MI.isInsertSubregLike().
1334 ///
1335 /// \see TargetInstrInfo::getInsertSubregInputs.
1336 virtual bool
1338 RegSubRegPair &BaseReg,
1339 RegSubRegPairAndIdx &InsertedReg) const {
1340 return false;
1341 }
1342
1343public:
1344 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1345 /// a store or a load and a store into two or more instruction. If this is
1346 /// possible, returns true as well as the new instructions by reference.
1347 virtual bool
1349 bool UnfoldLoad, bool UnfoldStore,
1350 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1351 return false;
1352 }
1353
1355 SmallVectorImpl<SDNode *> &NewNodes) const {
1356 return false;
1357 }
1358
1359 /// Returns the opcode of the would be new
1360 /// instruction after load / store are unfolded from an instruction of the
1361 /// specified opcode. It returns zero if the specified unfolding is not
1362 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1363 /// index of the operand which will hold the register holding the loaded
1364 /// value.
1365 virtual unsigned
1366 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1367 unsigned *LoadRegIndex = nullptr) const {
1368 return 0;
1369 }
1370
1371 /// This is used by the pre-regalloc scheduler to determine if two loads are
1372 /// loading from the same base address. It should only return true if the base
1373 /// pointers are the same and the only differences between the two addresses
1374 /// are the offset. It also returns the offsets by reference.
1375 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1376 int64_t &Offset1,
1377 int64_t &Offset2) const {
1378 return false;
1379 }
1380
1381 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1382 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1383 /// On some targets if two loads are loading from
1384 /// addresses in the same cache line, it's better if they are scheduled
1385 /// together. This function takes two integers that represent the load offsets
1386 /// from the common base address. It returns true if it decides it's desirable
1387 /// to schedule the two loads together. "NumLoads" is the number of loads that
1388 /// have already been scheduled after Load1.
1389 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1390 int64_t Offset1, int64_t Offset2,
1391 unsigned NumLoads) const {
1392 return false;
1393 }
1394
1395 /// Get the base operand and byte offset of an instruction that reads/writes
1396 /// memory. This is a convenience function for callers that are only prepared
1397 /// to handle a single base operand.
1399 const MachineOperand *&BaseOp, int64_t &Offset,
1400 bool &OffsetIsScalable,
1401 const TargetRegisterInfo *TRI) const;
1402
1403 /// Get zero or more base operands and the byte offset of an instruction that
1404 /// reads/writes memory. Note that there may be zero base operands if the
1405 /// instruction accesses a constant address.
1406 /// It returns false if MI does not read/write memory.
1407 /// It returns false if base operands and offset could not be determined.
1408 /// It is not guaranteed to always recognize base operands and offsets in all
1409 /// cases.
1412 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
1413 const TargetRegisterInfo *TRI) const {
1414 return false;
1415 }
1416
1417 /// Return true if the instruction contains a base register and offset. If
1418 /// true, the function also sets the operand position in the instruction
1419 /// for the base register and offset.
1421 unsigned &BasePos,
1422 unsigned &OffsetPos) const {
1423 return false;
1424 }
1425
1426 /// Target dependent implementation to get the values constituting the address
1427 /// MachineInstr that is accessing memory. These values are returned as a
1428 /// struct ExtAddrMode which contains all relevant information to make up the
1429 /// address.
1430 virtual std::optional<ExtAddrMode>
1432 const TargetRegisterInfo *TRI) const {
1433 return std::nullopt;
1434 }
1435
1436 /// Returns true if MI's Def is NullValueReg, and the MI
1437 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
1438 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
1439 /// function can return true even if becomes zero. Specifically cases such as
1440 /// NullValueReg = shl NullValueReg, 63.
1442 const Register NullValueReg,
1443 const TargetRegisterInfo *TRI) const {
1444 return false;
1445 }
1446
1447 /// If the instruction is an increment of a constant value, return the amount.
1448 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1449 return false;
1450 }
1451
1452 /// Returns true if the two given memory operations should be scheduled
1453 /// adjacent. Note that you have to add:
1454 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1455 /// or
1456 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1457 /// to TargetPassConfig::createMachineScheduler() to have an effect.
1458 ///
1459 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
1460 /// \p NumLoads is the number of loads that will be in the cluster if this
1461 /// hook returns true.
1462 /// \p NumBytes is the number of bytes that will be loaded from all the
1463 /// clustered loads if this hook returns true.
1466 unsigned NumLoads, unsigned NumBytes) const {
1467 llvm_unreachable("target did not implement shouldClusterMemOps()");
1468 }
1469
1470 /// Reverses the branch condition of the specified condition list,
1471 /// returning false on success and true if it cannot be reversed.
1472 virtual bool
1474 return true;
1475 }
1476
1477 /// Insert a noop into the instruction stream at the specified point.
1478 virtual void insertNoop(MachineBasicBlock &MBB,
1480
1481 /// Insert noops into the instruction stream at the specified point.
1482 virtual void insertNoops(MachineBasicBlock &MBB,
1484 unsigned Quantity) const;
1485
1486 /// Return the noop instruction to use for a noop.
1487 virtual MCInst getNop() const;
1488
1489 /// Return true for post-incremented instructions.
1490 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1491
1492 /// Returns true if the instruction is already predicated.
1493 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1494
1495 /// Assumes the instruction is already predicated and returns true if the
1496 /// instruction can be predicated again.
1497 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
1498 assert(isPredicated(MI) && "Instruction is not predicated");
1499 return false;
1500 }
1501
1502 // Returns a MIRPrinter comment for this machine operand.
1503 virtual std::string
1505 unsigned OpIdx, const TargetRegisterInfo *TRI) const;
1506
1507 /// Returns true if the instruction is a
1508 /// terminator instruction that has not been predicated.
1509 bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1510
1511 /// Returns true if MI is an unconditional tail call.
1512 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1513 return false;
1514 }
1515
1516 /// Returns true if the tail call can be made conditional on BranchCond.
1518 const MachineInstr &TailCall) const {
1519 return false;
1520 }
1521
1522 /// Replace the conditional branch in MBB with a conditional tail call.
1525 const MachineInstr &TailCall) const {
1526 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1527 }
1528
1529 /// Convert the instruction into a predicated instruction.
1530 /// It returns true if the operation was successful.
1531 virtual bool PredicateInstruction(MachineInstr &MI,
1532 ArrayRef<MachineOperand> Pred) const;
1533
1534 /// Returns true if the first specified predicate
1535 /// subsumes the second, e.g. GE subsumes GT.
1537 ArrayRef<MachineOperand> Pred2) const {
1538 return false;
1539 }
1540
1541 /// If the specified instruction defines any predicate
1542 /// or condition code register(s) used for predication, returns true as well
1543 /// as the definition predicate(s) by reference.
1544 /// SkipDead should be set to false at any point that dead
1545 /// predicate instructions should be considered as being defined.
1546 /// A dead predicate instruction is one that is guaranteed to be removed
1547 /// after a call to PredicateInstruction.
1549 std::vector<MachineOperand> &Pred,
1550 bool SkipDead) const {
1551 return false;
1552 }
1553
1554 /// Return true if the specified instruction can be predicated.
1555 /// By default, this returns true for every instruction with a
1556 /// PredicateOperand.
1557 virtual bool isPredicable(const MachineInstr &MI) const {
1558 return MI.getDesc().isPredicable();
1559 }
1560
1561 /// Return true if it's safe to move a machine
1562 /// instruction that defines the specified register class.
1563 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1564 return true;
1565 }
1566
1567 /// Test if the given instruction should be considered a scheduling boundary.
1568 /// This primarily includes labels and terminators.
1569 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1570 const MachineBasicBlock *MBB,
1571 const MachineFunction &MF) const;
1572
1573 /// Measure the specified inline asm to determine an approximation of its
1574 /// length.
1575 virtual unsigned getInlineAsmLength(
1576 const char *Str, const MCAsmInfo &MAI,
1577 const TargetSubtargetInfo *STI = nullptr) const;
1578
1579 /// Allocate and return a hazard recognizer to use for this target when
1580 /// scheduling the machine instructions before register allocation.
1581 virtual ScheduleHazardRecognizer *
1583 const ScheduleDAG *DAG) const;
1584
1585 /// Allocate and return a hazard recognizer to use for this target when
1586 /// scheduling the machine instructions before register allocation.
1587 virtual ScheduleHazardRecognizer *
1589 const ScheduleDAGMI *DAG) const;
1590
1591 /// Allocate and return a hazard recognizer to use for this target when
1592 /// scheduling the machine instructions after register allocation.
1593 virtual ScheduleHazardRecognizer *
1595 const ScheduleDAG *DAG) const;
1596
1597 /// Allocate and return a hazard recognizer to use for by non-scheduling
1598 /// passes.
1599 virtual ScheduleHazardRecognizer *
1601 return nullptr;
1602 }
1603
1604 /// Provide a global flag for disabling the PreRA hazard recognizer that
1605 /// targets may choose to honor.
1606 bool usePreRAHazardRecognizer() const;
1607
1608 /// For a comparison instruction, return the source registers
1609 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1610 /// compares against in CmpValue. Return true if the comparison instruction
1611 /// can be analyzed.
1612 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
1613 Register &SrcReg2, int64_t &Mask,
1614 int64_t &Value) const {
1615 return false;
1616 }
1617
1618 /// See if the comparison instruction can be converted
1619 /// into something more efficient. E.g., on ARM most instructions can set the
1620 /// flags register, obviating the need for a separate CMP.
1621 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
1622 Register SrcReg2, int64_t Mask,
1623 int64_t Value,
1624 const MachineRegisterInfo *MRI) const {
1625 return false;
1626 }
1627 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1628
1629 /// Try to remove the load by folding it to a register operand at the use.
1630 /// We fold the load instructions if and only if the
1631 /// def and use are in the same BB. We only look at one load and see
1632 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1633 /// defined by the load we are trying to fold. DefMI returns the machine
1634 /// instruction that defines FoldAsLoadDefReg, and the function returns
1635 /// the machine instruction generated due to folding.
1637 const MachineRegisterInfo *MRI,
1638 Register &FoldAsLoadDefReg,
1639 MachineInstr *&DefMI) const {
1640 return nullptr;
1641 }
1642
1643 /// 'Reg' is known to be defined by a move immediate instruction,
1644 /// try to fold the immediate into the use instruction.
1645 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1646 /// then the caller may assume that DefMI has been erased from its parent
1647 /// block. The caller may assume that it will not be erased by this
1648 /// function otherwise.
1651 return false;
1652 }
1653
1654 /// Return the number of u-operations the given machine
1655 /// instruction will be decoded to on the target cpu. The itinerary's
1656 /// IssueWidth is the number of microops that can be dispatched each
1657 /// cycle. An instruction with zero microops takes no dispatch resources.
1658 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1659 const MachineInstr &MI) const;
1660
1661 /// Return true for pseudo instructions that don't consume any
1662 /// machine resources in their current form. These are common cases that the
1663 /// scheduler should consider free, rather than conservatively handling them
1664 /// as instructions with no itinerary.
1665 bool isZeroCost(unsigned Opcode) const {
1666 return Opcode <= TargetOpcode::COPY;
1667 }
1668
1669 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1670 SDNode *DefNode, unsigned DefIdx,
1671 SDNode *UseNode, unsigned UseIdx) const;
1672
1673 /// Compute and return the use operand latency of a given pair of def and use.
1674 /// In most cases, the static scheduling itinerary was enough to determine the
1675 /// operand latency. But it may not be possible for instructions with variable
1676 /// number of defs / uses.
1677 ///
1678 /// This is a raw interface to the itinerary that may be directly overridden
1679 /// by a target. Use computeOperandLatency to get the best estimate of
1680 /// latency.
1681 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1682 const MachineInstr &DefMI, unsigned DefIdx,
1683 const MachineInstr &UseMI,
1684 unsigned UseIdx) const;
1685
1686 /// Compute the instruction latency of a given instruction.
1687 /// If the instruction has higher cost when predicated, it's returned via
1688 /// PredCost.
1689 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1690 const MachineInstr &MI,
1691 unsigned *PredCost = nullptr) const;
1692
1693 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1694
1695 virtual int getInstrLatency(const InstrItineraryData *ItinData,
1696 SDNode *Node) const;
1697
1698 /// Return the default expected latency for a def based on its opcode.
1699 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1700 const MachineInstr &DefMI) const;
1701
1702 /// Return true if this opcode has high latency to its result.
1703 virtual bool isHighLatencyDef(int opc) const { return false; }
1704
1705 /// Compute operand latency between a def of 'Reg'
1706 /// and a use in the current loop. Return true if the target considered
1707 /// it 'high'. This is used by optimization passes such as machine LICM to
1708 /// determine whether it makes sense to hoist an instruction out even in a
1709 /// high register pressure situation.
1710 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1711 const MachineRegisterInfo *MRI,
1712 const MachineInstr &DefMI, unsigned DefIdx,
1713 const MachineInstr &UseMI,
1714 unsigned UseIdx) const {
1715 return false;
1716 }
1717
1718 /// Compute operand latency of a def of 'Reg'. Return true
1719 /// if the target considered it 'low'.
1720 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1721 const MachineInstr &DefMI,
1722 unsigned DefIdx) const;
1723
1724 /// Perform target-specific instruction verification.
1725 virtual bool verifyInstruction(const MachineInstr &MI,
1726 StringRef &ErrInfo) const {
1727 return true;
1728 }
1729
1730 /// Return the current execution domain and bit mask of
1731 /// possible domains for instruction.
1732 ///
1733 /// Some micro-architectures have multiple execution domains, and multiple
1734 /// opcodes that perform the same operation in different domains. For
1735 /// example, the x86 architecture provides the por, orps, and orpd
1736 /// instructions that all do the same thing. There is a latency penalty if a
1737 /// register is written in one domain and read in another.
1738 ///
1739 /// This function returns a pair (domain, mask) containing the execution
1740 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1741 /// function can be used to change the opcode to one of the domains in the
1742 /// bit mask. Instructions whose execution domain can't be changed should
1743 /// return a 0 mask.
1744 ///
1745 /// The execution domain numbers don't have any special meaning except domain
1746 /// 0 is used for instructions that are not associated with any interesting
1747 /// execution domain.
1748 ///
1749 virtual std::pair<uint16_t, uint16_t>
1751 return std::make_pair(0, 0);
1752 }
1753
1754 /// Change the opcode of MI to execute in Domain.
1755 ///
1756 /// The bit (1 << Domain) must be set in the mask returned from
1757 /// getExecutionDomain(MI).
1758 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1759
1760 /// Returns the preferred minimum clearance
1761 /// before an instruction with an unwanted partial register update.
1762 ///
1763 /// Some instructions only write part of a register, and implicitly need to
1764 /// read the other parts of the register. This may cause unwanted stalls
1765 /// preventing otherwise unrelated instructions from executing in parallel in
1766 /// an out-of-order CPU.
1767 ///
1768 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1769 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1770 /// the instruction needs to wait for the old value of the register to become
1771 /// available:
1772 ///
1773 /// addps %xmm1, %xmm0
1774 /// movaps %xmm0, (%rax)
1775 /// cvtsi2ss %rbx, %xmm0
1776 ///
1777 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1778 /// instruction before it can issue, even though the high bits of %xmm0
1779 /// probably aren't needed.
1780 ///
1781 /// This hook returns the preferred clearance before MI, measured in
1782 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1783 /// instructions before MI. It should only return a positive value for
1784 /// unwanted dependencies. If the old bits of the defined register have
1785 /// useful values, or if MI is determined to otherwise read the dependency,
1786 /// the hook should return 0.
1787 ///
1788 /// The unwanted dependency may be handled by:
1789 ///
1790 /// 1. Allocating the same register for an MI def and use. That makes the
1791 /// unwanted dependency identical to a required dependency.
1792 ///
1793 /// 2. Allocating a register for the def that has no defs in the previous N
1794 /// instructions.
1795 ///
1796 /// 3. Calling breakPartialRegDependency() with the same arguments. This
1797 /// allows the target to insert a dependency breaking instruction.
1798 ///
1799 virtual unsigned
1801 const TargetRegisterInfo *TRI) const {
1802 // The default implementation returns 0 for no partial register dependency.
1803 return 0;
1804 }
1805
1806 /// Return the minimum clearance before an instruction that reads an
1807 /// unused register.
1808 ///
1809 /// For example, AVX instructions may copy part of a register operand into
1810 /// the unused high bits of the destination register.
1811 ///
1812 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1813 ///
1814 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1815 /// false dependence on any previous write to %xmm0.
1816 ///
1817 /// This hook works similarly to getPartialRegUpdateClearance, except that it
1818 /// does not take an operand index. Instead sets \p OpNum to the index of the
1819 /// unused register.
1820 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
1821 const TargetRegisterInfo *TRI) const {
1822 // The default implementation returns 0 for no undef register dependency.
1823 return 0;
1824 }
1825
1826 /// Insert a dependency-breaking instruction
1827 /// before MI to eliminate an unwanted dependency on OpNum.
1828 ///
1829 /// If it wasn't possible to avoid a def in the last N instructions before MI
1830 /// (see getPartialRegUpdateClearance), this hook will be called to break the
1831 /// unwanted dependency.
1832 ///
1833 /// On x86, an xorps instruction can be used as a dependency breaker:
1834 ///
1835 /// addps %xmm1, %xmm0
1836 /// movaps %xmm0, (%rax)
1837 /// xorps %xmm0, %xmm0
1838 /// cvtsi2ss %rbx, %xmm0
1839 ///
1840 /// An <imp-kill> operand should be added to MI if an instruction was
1841 /// inserted. This ties the instructions together in the post-ra scheduler.
1842 ///
1843 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1844 const TargetRegisterInfo *TRI) const {}
1845
1846 /// Create machine specific model for scheduling.
1847 virtual DFAPacketizer *
1849 return nullptr;
1850 }
1851
1852 /// Sometimes, it is possible for the target
1853 /// to tell, even without aliasing information, that two MIs access different
1854 /// memory addresses. This function returns true if two MIs access different
1855 /// memory addresses and false otherwise.
1856 ///
1857 /// Assumes any physical registers used to compute addresses have the same
1858 /// value for both instructions. (This is the most useful assumption for
1859 /// post-RA scheduling.)
1860 ///
1861 /// See also MachineInstr::mayAlias, which is implemented on top of this
1862 /// function.
1863 virtual bool
1865 const MachineInstr &MIb) const {
1866 assert(MIa.mayLoadOrStore() &&
1867 "MIa must load from or modify a memory location");
1868 assert(MIb.mayLoadOrStore() &&
1869 "MIb must load from or modify a memory location");
1870 return false;
1871 }
1872
1873 /// Return the value to use for the MachineCSE's LookAheadLimit,
1874 /// which is a heuristic used for CSE'ing phys reg defs.
1875 virtual unsigned getMachineCSELookAheadLimit() const {
1876 // The default lookahead is small to prevent unprofitable quadratic
1877 // behavior.
1878 return 5;
1879 }
1880
1881 /// Return the maximal number of alias checks on memory operands. For
1882 /// instructions with more than one memory operands, the alias check on a
1883 /// single MachineInstr pair has quadratic overhead and results in
1884 /// unacceptable performance in the worst case. The limit here is to clamp
1885 /// that maximal checks performed. Usually, that's the product of memory
1886 /// operand numbers from that pair of MachineInstr to be checked. For
1887 /// instance, with two MachineInstrs with 4 and 5 memory operands
1888 /// correspondingly, a total of 20 checks are required. With this limit set to
1889 /// 16, their alias check is skipped. We choose to limit the product instead
1890 /// of the individual instruction as targets may have special MachineInstrs
1891 /// with a considerably high number of memory operands, such as `ldm` in ARM.
1892 /// Setting this limit per MachineInstr would result in either too high
1893 /// overhead or too rigid restriction.
1894 virtual unsigned getMemOperandAACheckLimit() const { return 16; }
1895
1896 /// Return an array that contains the ids of the target indices (used for the
1897 /// TargetIndex machine operand) and their names.
1898 ///
1899 /// MIR Serialization is able to serialize only the target indices that are
1900 /// defined by this method.
1903 return std::nullopt;
1904 }
1905
1906 /// Decompose the machine operand's target flags into two values - the direct
1907 /// target flag value and any of bit flags that are applied.
1908 virtual std::pair<unsigned, unsigned>
1910 return std::make_pair(0u, 0u);
1911 }
1912
1913 /// Return an array that contains the direct target flag values and their
1914 /// names.
1915 ///
1916 /// MIR Serialization is able to serialize only the target flags that are
1917 /// defined by this method.
1920 return std::nullopt;
1921 }
1922
1923 /// Return an array that contains the bitmask target flag values and their
1924 /// names.
1925 ///
1926 /// MIR Serialization is able to serialize only the target flags that are
1927 /// defined by this method.
1930 return std::nullopt;
1931 }
1932
1933 /// Return an array that contains the MMO target flag values and their
1934 /// names.
1935 ///
1936 /// MIR Serialization is able to serialize only the MMO target flags that are
1937 /// defined by this method.
1940 return std::nullopt;
1941 }
1942
1943 /// Determines whether \p Inst is a tail call instruction. Override this
1944 /// method on targets that do not properly set MCID::Return and MCID::Call on
1945 /// tail call instructions."
1946 virtual bool isTailCall(const MachineInstr &Inst) const {
1947 return Inst.isReturn() && Inst.isCall();
1948 }
1949
1950 /// True if the instruction is bound to the top of its basic block and no
1951 /// other instructions shall be inserted before it. This can be implemented
1952 /// to prevent register allocator to insert spills before such instructions.
1953 virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1954 return false;
1955 }
1956
1957 /// During PHI eleimination lets target to make necessary checks and
1958 /// insert the copy to the PHI destination register in a target specific
1959 /// manner.
1962 const DebugLoc &DL, Register Src, Register Dst) const {
1963 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1964 .addReg(Src);
1965 }
1966
1967 /// During PHI eleimination lets target to make necessary checks and
1968 /// insert the copy to the PHI destination register in a target specific
1969 /// manner.
1972 const DebugLoc &DL, Register Src,
1973 unsigned SrcSubReg,
1974 Register Dst) const {
1975 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1976 .addReg(Src, 0, SrcSubReg);
1977 }
1978
1979 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
1980 /// information for a set of outlining candidates. Returns std::nullopt if the
1981 /// candidates are not suitable for outlining.
1982 virtual std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo(
1983 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1985 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
1986 }
1987
1988 /// Optional target hook to create the LLVM IR attributes for the outlined
1989 /// function. If overridden, the overriding function must call the default
1990 /// implementation.
1992 Function &F, std::vector<outliner::Candidate> &Candidates) const;
1993
1994protected:
1995 /// Target-dependent implementation for getOutliningTypeImpl.
1996 virtual outliner::InstrType
1999 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
2000 }
2001
2002public:
2003 /// Returns how or if \p MIT should be outlined. \p Flags is the
2004 /// target-specific information returned by isMBBSafeToOutlineFrom.
2007
2008 /// Optional target hook that returns true if \p MBB is safe to outline from,
2009 /// and returns any target-specific information in \p Flags.
2011 unsigned &Flags) const;
2012
2013 /// Optional target hook which partitions \p MBB into outlinable ranges for
2014 /// instruction mapping purposes. Each range is defined by two iterators:
2015 /// [start, end).
2016 ///
2017 /// Ranges are expected to be ordered top-down. That is, ranges closer to the
2018 /// top of the block should come before ranges closer to the end of the block.
2019 ///
2020 /// Ranges cannot overlap.
2021 ///
2022 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
2023 ///
2024 /// All instructions not present in an outlinable range are considered
2025 /// illegal.
2026 virtual SmallVector<
2027 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
2029 return {std::make_pair(MBB.begin(), MBB.end())};
2030 }
2031
2032 /// Insert a custom frame for outlined functions.
2034 const outliner::OutlinedFunction &OF) const {
2036 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
2037 }
2038
2039 /// Insert a call to an outlined function into the program.
2040 /// Returns an iterator to the spot where we inserted the call. This must be
2041 /// implemented by the target.
2045 outliner::Candidate &C) const {
2047 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
2048 }
2049
2050 /// Return true if the function can safely be outlined from.
2051 /// A function \p MF is considered safe for outlining if an outlined function
2052 /// produced from instructions in F will produce a program which produces the
2053 /// same output for any set of given inputs.
2055 bool OutlineFromLinkOnceODRs) const {
2056 llvm_unreachable("Target didn't implement "
2057 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
2058 }
2059
2060 /// Return true if the function should be outlined from by default.
2062 return false;
2063 }
2064
2065 /// Produce the expression describing the \p MI loading a value into
2066 /// the physical register \p Reg. This hook should only be used with
2067 /// \p MIs belonging to VReg-less functions.
2068 virtual std::optional<ParamLoadedValue>
2070
2071 /// Given the generic extension instruction \p ExtMI, returns true if this
2072 /// extension is a likely candidate for being folded into an another
2073 /// instruction.
2075 MachineRegisterInfo &MRI) const {
2076 return false;
2077 }
2078
2079 /// Return MIR formatter to format/parse MIR operands. Target can override
2080 /// this virtual function and return target specific MIR formatter.
2081 virtual const MIRFormatter *getMIRFormatter() const {
2082 if (!Formatter.get())
2083 Formatter = std::make_unique<MIRFormatter>();
2084 return Formatter.get();
2085 }
2086
2087 /// Returns the target-specific default value for tail duplication.
2088 /// This value will be used if the tail-dup-placement-threshold argument is
2089 /// not provided.
2090 virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const {
2091 return OptLevel >= CodeGenOpt::Aggressive ? 4 : 2;
2092 }
2093
2094 /// Returns the callee operand from the given \p MI.
2095 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
2096 return MI.getOperand(0);
2097 }
2098
2099 /// Return the uniformity behavior of the given instruction.
2100 virtual InstructionUniformity
2103 }
2104
2105 /// Returns true if the given \p MI defines a TargetIndex operand that can be
2106 /// tracked by their offset, can have values, and can have debug info
2107 /// associated with it. If so, sets \p Index and \p Offset of the target index
2108 /// operand.
2110 int64_t &Offset) const {
2111 return false;
2112 }
2113
2114private:
2115 mutable std::unique_ptr<MIRFormatter> Formatter;
2116 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
2117 unsigned CatchRetOpcode;
2118 unsigned ReturnOpcode;
2119};
2120
2121/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
2124
2126 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
2127 RegInfo::getEmptyKey());
2128 }
2129
2131 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
2132 RegInfo::getTombstoneKey());
2133 }
2134
2135 /// Reuse getHashValue implementation from
2136 /// std::pair<unsigned, unsigned>.
2137 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
2138 std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
2139 return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
2140 }
2141
2144 return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
2145 RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
2146 }
2147};
2148
2149} // end namespace llvm
2150
2151#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
SmallVector< MachineOperand, 4 > Cond
This file defines DenseMapInfo traits for DenseMap.
This file defines the DenseMap class.
uint64_t Size
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Check Debug Module
Contains all data structures shared between the outliner implemented in MachineOutliner....
unsigned const TargetRegisterInfo * TRI
unsigned Reg
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
@ Flags
Definition: TextStubV5.cpp:93
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:701
A debug info location.
Definition: DebugLoc.h:33
Itinerary data supplied by a subtarget to be used by a target.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition: MCAsmInfo.h:56
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
MIRFormater - Interface to format MIR operand based on target.
Definition: MIRFormatter.h:28
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
bool isReturn(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:894
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:904
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Represents one node in the SelectionDAG.
This class represents the scheduled code.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
virtual void disposed()=0
Called when the loop is being removed.
virtual void adjustTripCount(int TripCountAdjust)=0
Modify the loop such that the trip count is OriginalTC + TripCountAdjust.
virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const =0
Return true if the given instruction should not be pipelined and should be ignored.
virtual void setPreheader(MachineBasicBlock *NewPreheader)=0
Called when the loop's preheader has been modified to NewPreheader.
virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
Return true if the proposed schedule should used.
virtual std::optional< bool > createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond)=0
Create a condition to determine if the trip count of the loop is greater than TC, where TC is always ...
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook which partitions MBB into outlinable ranges for instruction mapping purposes.
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
virtual bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const
Reverses the branch condition of the specified condition list, returning false on success and true if...
virtual MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const
Try to remove the load by folding it to a register operand at the use.
virtual unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const
Remove the branching code at the end of the specific MBB.
virtual std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
virtual bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const
If the specified instruction defines any predicate or condition code register(s) used for predication...
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const
'Reg' is known to be defined by a move immediate instruction, try to fold the immediate into the use ...
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const
Assumes the instruction is already predicated and returns true if the instruction can be predicated a...
virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr &DefMI, unsigned DefIdx) const
Compute operand latency of a def of 'Reg'.
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const
This is an architecture-specific helper function of reassociateOps.
bool isZeroCost(unsigned Opcode) const
Return true for pseudo instructions that don't consume any machine resources in their current form.
virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const
Given the generic extension instruction ExtMI, returns true if this extension is a likely candidate f...
virtual unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Returns the preferred minimum clearance before an instruction with an unwanted partial register updat...
virtual bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const
Get zero or more base operands and the byte offset of an instruction that reads/writes memory.
virtual bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Returns true if the tail call can be made conditional on BranchCond.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const
Return the number of u-operations the given machine instruction will be decoded to on the target cpu.
virtual DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &) const
Create machine specific model for scheduling.
virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, MachineBasicBlock &PreHeader, MachineInstr *IndVar, MachineInstr &Cmp, SmallVectorImpl< MachineOperand > &Cond, SmallVectorImpl< MachineInstr * > &PrevInsts, unsigned Iter, unsigned MaxIter) const
Generate code to reduce the loop iteration by one and check if the loop is finished.
virtual bool isPostIncrement(const MachineInstr &MI) const
Return true for post-incremented instructions.
bool isTriviallyReMaterializable(const MachineInstr &MI) const
Return true if the instruction is trivially rematerializable, meaning it has no side effects and requ...
virtual int getSPAdjust(const MachineInstr &MI) const
Returns the actual stack pointer adjustment made by an instruction as part of a call sequence.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Load the specified register of the given register class from the specified stack frame index.
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const
Return true if the instruction is a "coalescable" extension instruction.
virtual void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset=0, RegScavenger *RS=nullptr) const
Insert an unconditional indirect branch at the end of MBB to NewDestBB.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MachineBasicBlock *NewDest) const
Delete the instruction OldInst and everything after it, replacing it with an unconditional branch to ...
virtual ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const
Return an array that contains the MMO target flag values and their names.
virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const
Return true if the instruction contains a base register and offset.
virtual bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const
Convert the instruction into a predicated instruction.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const
Returns the opcode of the would be new instruction after load / store are unfolded from an instructio...
bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const
Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify=false) const
Analyze the branching code at the end of MBB and parse it into the MachineBranchPredicate structure i...
virtual bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Target-dependent implementation of getInsertSubregInputs.
virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const
Return true if the function should be outlined from by default.
virtual MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &NewMIs, bool PreferFalse=false) const
Given a select instruction that was understood by analyzeSelect and returned Optimizable = true,...
virtual const MIRFormatter * getMIRFormatter() const
Return MIR formatter to format/parse MIR operands.
virtual std::pair< unsigned, unsigned > getPatchpointUnfoldableRange(const MachineInstr &MI) const
For a patchpoint, stackmap, or statepoint intrinsic, return the range of operands which can't be fold...
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Returns how or if MIT should be outlined.
virtual bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const
Return true if target supports reassociation of instructions in machine combiner pass to reduce regis...
virtual ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const
Return an array that contains the ids of the target indices (used for the TargetIndex machine operand...
virtual int getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Return the minimum clearance before an instruction that reads an unused register.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const
Store the specified register of the given register class to the specified stack frame index.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const
Optional target hook to create the LLVM IR attributes for the outlined function.
virtual bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const
Returns true if MI's Def is NullValueReg, and the MI does not change the Zero value.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const
Perform target-specific instruction verification.
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const
Returns true if MI is an unconditional tail call.
bool isUnpredicatedTerminator(const MachineInstr &MI) const
Returns true if the instruction is a terminator instruction that has not been predicated.
virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const
Compute operand latency between a def of 'Reg' and a use in the current loop.
bool isUnspillableTerminator(const MachineInstr *MI) const
Return true if the given instruction is terminator that is unspillable, according to isUnspillableTer...
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const
Return true if it's profitable to unpredicate one side of a 'diamond', i.e.
virtual bool useMachineCombiner() const
Return true when a target supports MachineCombiner.
virtual void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
Insert a noop into the instruction stream at the specified point.
virtual bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const
Returns true if the first specified predicate subsumes the second, e.g.
bool isFrameInstr(const MachineInstr &I) const
Returns true if the argument is a frame pseudo instruction.
virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const
Insert a dependency-breaking instruction before MI to eliminate an unwanted dependency on OpNum.
virtual bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Target-dependent implementation of getRegSequenceInputs.
virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB, unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability) const
Second variant of isProfitableToIfCvt.
virtual int getExtendResourceLenLimit() const
The limit on resource length extension we accept in MachineCombiner Pass.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const
Allocate and return a hazard recognizer to use for by non-scheduling passes.
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true,...
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const
Compute the size in bytes and offset within a stack slot of a spilled register or subregister.
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const
Emit instructions to copy a pair of physical registers.
virtual bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const
Sometimes, it is possible for the target to tell, even without aliasing information,...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const
unsigned getReturnOpcode() const
virtual unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isStoreToStackSlot that returns the number of bytes stored to the stack.
virtual unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const
Measure the specified inline asm to determine an approximation of its length.
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const
Target-dependent implementation for getOutliningTypeImpl.
virtual bool isIgnorableUse(const MachineOperand &MO) const
Given MO is a PhysReg use return if it can be ignored for the purpose of instruction rematerializatio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
MachineInstr * foldMemoryOperand(MachineInstr &MI, ArrayRef< unsigned > Ops, int FI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Attempt to fold a load or store of the specified stack slot into the specified machine instruction fo...
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const
See if the comparison instruction can be converted into something more efficient.
virtual unsigned getMemOperandAACheckLimit() const
Return the maximal number of alias checks on memory operands.
virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const
Return true if the function can safely be outlined from.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const
Insert a custom frame for outlined functions.
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const
Insert branch code into the end of the specified MachineBasicBlock.
virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const
Return true if the given SDNode can be copied during scheduling even if it has glue.
virtual std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const
Target dependent implementation to get the values constituting the address MachineInstr that is acces...
virtual std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const
Target-dependent implementation for IsCopyInstr.
virtual MachineInstr * createPHIDestinationCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const
Returns true if MI is an instruction that defines Reg to have a constant value and the value is recor...
static bool isGenericOpcode(unsigned Opc)
TargetInstrInfo & operator=(const TargetInstrInfo &)=delete
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const
Return an array that contains the bitmask target flag values and their names.
unsigned getCallFrameSetupOpcode() const
These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).
virtual bool isSubregFoldable() const
Check whether the target can fold a load that feeds a subreg operand (or a subreg operand that feeds ...
virtual bool isBasicBlockPrologue(const MachineInstr &MI) const
True if the instruction is bound to the top of its basic block and no other instructions shall be ins...
virtual MCInst getNop() const
Return the noop instruction to use for a noop.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex, unsigned &MemBytes) const
Optional extension of isLoadFromStackSlot that returns the number of bytes loaded from the stack.
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const
Return the current execution domain and bit mask of possible domains for instruction.
virtual bool optimizeCondBranch(MachineInstr &MI) const
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, MachineInstr *&CmpInst) const
Analyze the loop code, return true if it cannot be understood.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
unsigned getCatchReturnOpcode() const
virtual InstructionUniformity getInstructionUniformity(const MachineInstr &MI) const
Return the uniformity behavior of the given instruction.
virtual bool isAsCheapAsAMove(const MachineInstr &MI) const
Return true if the instruction is as cheap as a move instruction.
virtual bool isTailCall(const MachineInstr &Inst) const
Determines whether Inst is a tail call instruction.
virtual const MachineOperand & getCalleeOperand(const MachineInstr &MI) const
Returns the callee operand from the given MI.
int64_t getFrameTotalSize(const MachineInstr &I) const
Returns the total frame size, which is made up of the space set up inside the pair of frame start-sto...
MachineInstr * commuteInstruction(MachineInstr &MI, bool NewMI=false, unsigned OpIdx1=CommuteAnyOperandIndex, unsigned OpIdx2=CommuteAnyOperandIndex) const
This method commutes the operands of the given machine instruction MI.
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const
Return true if the specified machine instruction is a copy of one stack slot to another and has no ot...
virtual int getJumpTableIndex(const MachineInstr &MI) const
Return an index for MachineJumpTableInfo if insn is an indirect jump using a jump table,...
virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert=false) const
Return true when \P Inst is both associative and commutative.
virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, int64_t &Offset) const
Returns true if the given MI defines a TargetIndex operand that can be tracked by their offset,...
virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const
Allow targets to tell MachineVerifier whether a specific register MachineOperand can be used as part ...
virtual void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const
Re-issue the specified 'original' instruction at the specific location targeting a new destination re...
virtual MachineInstr * createPHISourceCopy(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const
During PHI eleimination lets target to make necessary checks and insert the copy to the PHI destinati...
virtual MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const
Insert a call to an outlined function into the program.
virtual std::optional< unsigned > getInverseOpcode(unsigned Opcode) const
Return the inverse operation opcode if it exists for \P Opcode (e.g.
TargetInstrInfo(unsigned CFSetupOpcode=~0u, unsigned CFDestroyOpcode=~0u, unsigned CatchRetOpcode=~0u, unsigned ReturnOpcode=~0u)
virtual void insertNoops(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Quantity) const
Insert noops into the instruction stream at the specified point.
unsigned getCallFrameDestroyOpcode() const
int64_t getFrameSize(const MachineInstr &I) const
Returns size of the frame associated with the given frame instruction.
virtual MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const
virtual bool isPredicated(const MachineInstr &MI) const
Returns true if the instruction is already predicated.
bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const
Replace the conditional branch in MBB with a conditional tail call.
TargetInstrInfo(const TargetInstrInfo &)=delete
virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const
Return true when a code sequence can improve throughput.
virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const
Return an estimate for the code size reduction (in bytes) which will be caused by removing the given ...
virtual ~TargetInstrInfo()
virtual std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const
Returns a outliner::OutlinedFunction struct containing target-specific information for a set of outli...
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const
Compute the instruction latency of a given instruction.
virtual bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI=nullptr) const
Return true if two machine instructions would produce identical values.
virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const
Returns the target-specific default value for tail duplication.
bool isFrameSetup(const MachineInstr &I) const
Returns true if the argument is a frame setup pseudo instruction.
virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const
Return the increase in code size needed to predicate a contiguous run of NumInsts instructions.
virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const
When calculate the latency of the root instruction, accumulate the latency of the sequence to the roo...
std::optional< DestSourcePair > isCopyInstr(const MachineInstr &MI) const
If the specific machine instruction is a instruction that moves/copies value from one register to ano...
bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const
Return true if the input \P Inst is part of a chain of dependent ops that are suitable for reassociat...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const
Analyze the given select instruction, returning true if it cannot be understood.
std::pair< unsigned, unsigned > getReassociationOpcodes(MachineCombinerPattern Pattern, const MachineInstr &Root, const MachineInstr &Prev) const
Reassociation of some instructions requires inverse operations (e.g.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const
Returns the size in bytes of the specified MachineInstr, or ~0U when this function is not implemented...
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
virtual bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned NumLoads, unsigned NumBytes) const
Returns true if the two given memory operations should be scheduled adjacent.
virtual unsigned getPredicationCost(const MachineInstr &MI) const
virtual bool shouldSink(const MachineInstr &MI) const
Return true if the instruction should be sunk by MachineSink.
virtual MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const
This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_ADDR flag.
virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const
Change the opcode of MI to execute in Domain.
virtual bool isPredicable(const MachineInstr &MI) const
Return true if the specified instruction can be predicated.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned) const
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const
Return true if it's safe to move a machine instruction that defines the specified register class.
virtual bool canInsertSelect(const MachineBasicBlock &MBB, ArrayRef< MachineOperand > Cond, Register DstReg, Register TrueReg, Register FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const
Return true if it is possible to insert a select instruction that chooses between TrueReg and FalseRe...
virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const
Return a strategy that MachineCombiner must use when creating traces.
bool getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const
Return true if the given terminator MI is not expected to spill.
virtual std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const
If the specific machine instruction is an instruction that adds an immediate value and a physical reg...
unsigned defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr &DefMI) const
Return the default expected latency for a def based on its opcode.
static bool isGenericAtomicRMWOpcode(unsigned Opc)
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const
Returns true if the target has a preference on the operands order of the given machine instruction.
static const unsigned CommuteAnyOperandIndex
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const
Check for post-frame ptr elimination stack locations as well.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
void reassociateOps(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
Attempt to reassociate \P Root and \P Prev according to \P Pattern to reduce critical path length.
virtual bool isHighLatencyDef(int opc) const
Return true if this opcode has high latency to its result.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool expandPostRAPseudo(MachineInstr &MI) const
This function is called for all pseudo instructions that remain after register allocation.
static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, unsigned CommutableOpIdx1, unsigned CommutableOpIdx2)
Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable operand indices to (ResultIdx1,...
virtual ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const
Return an array that contains the direct target flag values and their names.
virtual bool shouldHoist(const MachineInstr &MI, const MachineLoop *FromLoop) const
Return false if the instruction should not be hoisted by MachineLICM.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const
Target-dependent implementation of getExtractSubregInputs.
bool usePreRAHazardRecognizer() const
Provide a global flag for disabling the PreRA hazard recognizer that targets may choose to honor.
unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, MachineBasicBlock *DestBB, const DebugLoc &DL, int *BytesAdded=nullptr) const
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const
If the instruction is an increment of a constant value, return the amount.
virtual void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const
Fix up the placeholder we may add in genAlternativeCodeSequence().
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS=nullptr) const
Target-dependent implementation for foldMemoryOperand.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const
This is used by the pre-regalloc scheduler to determine if two loads are loading from the same base a...
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, SmallVectorImpl< SDNode * > &NewNodes) const
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI, const MachineFunction &MF) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
virtual unsigned getMachineCSELookAheadLimit() const
Return the value to use for the MachineCSE's LookAheadLimit, which is a heuristic used for CSE'ing ph...
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const
Return true if it's legal to split the given basic block at the specified instruction (i....
bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const
Get the base operand and byte offset of an instruction that reads/writes memory.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
TargetSubtargetInfo - Generic base class for all target subtargets.
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Level
Code generation optimization level.
Definition: CodeGen.h:57
@ Aggressive
-O3
Definition: CodeGen.h:61
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
MachineTraceStrategy
Strategies for selecting traces.
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
InstructionUniformity
Enum describing how instructions behave with respect to uniformity and divergence,...
Definition: Uniformity.h:18
@ Default
The result values are uniform if and only if all operands are uniform.
#define N
static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val)
Reuse getHashValue implementation from std::pair<unsigned, unsigned>.
static TargetInstrInfo::RegSubRegPair getTombstoneKey()
static TargetInstrInfo::RegSubRegPair getEmptyKey()
static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, const TargetInstrInfo::RegSubRegPair &RHS)
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
const MachineOperand * Source
DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
const MachineOperand * Destination
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:256
Used to describe a register and immediate addition.
RegImmPair(Register Reg, int64_t Imm)
Represents a predicate at the MachineFunction level.
bool SingleUseCondition
SingleUseCondition is true if ConditionDef is dead except for the branch(es) at the end of the basic ...
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
RegSubRegPairAndIdx(Register Reg=Register(), unsigned SubReg=0, unsigned SubIdx=0)
A pair composed of a register and a sub-register index.
bool operator==(const RegSubRegPair &P) const
RegSubRegPair(Register Reg=Register(), unsigned SubReg=0)
bool operator!=(const RegSubRegPair &P) const
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.