LLVM 18.0.0git
AArch64InstrInfo.h
Go to the documentation of this file.
1//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16#include "AArch64.h"
17#include "AArch64RegisterInfo.h"
20#include <optional>
21
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28
33
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35
37 const AArch64RegisterInfo RI;
38 const AArch64Subtarget &Subtarget;
39
40public:
41 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
42
43 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
44 /// such, whenever a client has an instance of instruction info, it should
45 /// always be able to get register info as well (through this method).
46 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
47
48 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
49
50 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
51
52 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
53 Register &DstReg, unsigned &SubIdx) const override;
54
55 bool
57 const MachineInstr &MIb) const override;
58
59 unsigned isLoadFromStackSlot(const MachineInstr &MI,
60 int &FrameIndex) const override;
61 unsigned isStoreToStackSlot(const MachineInstr &MI,
62 int &FrameIndex) const override;
63
64 /// Does this instruction set its full destination register to zero?
65 static bool isGPRZero(const MachineInstr &MI);
66
67 /// Does this instruction rename a GPR without modifying bits?
68 static bool isGPRCopy(const MachineInstr &MI);
69
70 /// Does this instruction rename an FPR without modifying bits?
71 static bool isFPRCopy(const MachineInstr &MI);
72
73 /// Return true if pairing the given load or store is hinted to be
74 /// unprofitable.
75 static bool isLdStPairSuppressed(const MachineInstr &MI);
76
77 /// Return true if the given load or store is a strided memory access.
78 static bool isStridedAccess(const MachineInstr &MI);
79
80 /// Return true if it has an unscaled load/store offset.
81 static bool hasUnscaledLdStOffset(unsigned Opc);
83 return hasUnscaledLdStOffset(MI.getOpcode());
84 }
85
86 /// Returns the unscaled load/store for the scaled load/store opcode,
87 /// if there is a corresponding unscaled variant available.
88 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
89
90 /// Scaling factor for (scaled or unscaled) load or store.
91 static int getMemScale(unsigned Opc);
92 static int getMemScale(const MachineInstr &MI) {
93 return getMemScale(MI.getOpcode());
94 }
95
96 /// Returns whether the instruction is a pre-indexed load.
97 static bool isPreLd(const MachineInstr &MI);
98
99 /// Returns whether the instruction is a pre-indexed store.
100 static bool isPreSt(const MachineInstr &MI);
101
102 /// Returns whether the instruction is a pre-indexed load/store.
103 static bool isPreLdSt(const MachineInstr &MI);
104
105 /// Returns whether the instruction is a paired load/store.
106 static bool isPairedLdSt(const MachineInstr &MI);
107
108 /// Returns the base register operator of a load/store.
109 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
110
111 /// Returns the immediate offset operator of a load/store.
112 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
113
114 /// Returns whether the instruction is FP or NEON.
115 static bool isFpOrNEON(const MachineInstr &MI);
116
117 /// Returns whether the instruction is in H form (16 bit operands)
118 static bool isHForm(const MachineInstr &MI);
119
120 /// Returns whether the instruction is in Q form (128 bit operands)
121 static bool isQForm(const MachineInstr &MI);
122
123 /// Returns whether the instruction can be compatible with non-zero BTYPE.
124 static bool hasBTISemantics(const MachineInstr &MI);
125
126 /// Returns the index for the immediate for a given instruction.
127 static unsigned getLoadStoreImmIdx(unsigned Opc);
128
129 /// Return true if pairing the given load or store may be paired with another.
130 static bool isPairableLdStInst(const MachineInstr &MI);
131
132 /// Returns true if MI is one of the TCRETURN* instructions.
133 static bool isTailCallReturnInst(const MachineInstr &MI);
134
135 /// Return the opcode that set flags when possible. The caller is
136 /// responsible for ensuring the opc has a flag setting equivalent.
137 static unsigned convertToFlagSettingOpc(unsigned Opc);
138
139 /// Return true if this is a load/store that can be potentially paired/merged.
140 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
141
142 /// Hint that pairing the given load or store is unprofitable.
143 static void suppressLdStPair(MachineInstr &MI);
144
145 std::optional<ExtAddrMode>
147 const TargetRegisterInfo *TRI) const override;
148
150 const MachineInstr &AddrI,
151 ExtAddrMode &AM) const override;
152
154 const ExtAddrMode &AM) const override;
155
158 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
159 const TargetRegisterInfo *TRI) const override;
160
161 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
162 /// This is true for some SVE instructions like ldr/str that have a
163 /// 'reg + imm' addressing mode where the immediate is an index to the
164 /// scalable vector located at 'reg + imm * vscale x #bytes'.
166 const MachineOperand *&BaseOp,
167 int64_t &Offset, bool &OffsetIsScalable,
168 TypeSize &Width,
169 const TargetRegisterInfo *TRI) const;
170
171 /// Return the immediate offset of the base register in a load/store \p LdSt.
173
174 /// Returns true if opcode \p Opc is a memory operation. If it is, set
175 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
176 ///
177 /// For unscaled instructions, \p Scale is set to 1.
178 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
179 int64_t &MinOffset, int64_t &MaxOffset);
180
183 unsigned ClusterSize,
184 unsigned NumBytes) const override;
185
187 const DebugLoc &DL, MCRegister DestReg,
188 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
189 llvm::ArrayRef<unsigned> Indices) const;
191 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
192 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
193 llvm::ArrayRef<unsigned> Indices) const;
195 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
196 bool KillSrc) const override;
197
200 bool isKill, int FrameIndex,
201 const TargetRegisterClass *RC,
202 const TargetRegisterInfo *TRI,
203 Register VReg) const override;
204
207 int FrameIndex, const TargetRegisterClass *RC,
208 const TargetRegisterInfo *TRI,
209 Register VReg) const override;
210
211 // This tells target independent code that it is okay to pass instructions
212 // with subreg operands to foldMemoryOperandImpl.
213 bool isSubregFoldable() const override { return true; }
214
219 MachineBasicBlock::iterator InsertPt, int FrameIndex,
220 LiveIntervals *LIS = nullptr,
221 VirtRegMap *VRM = nullptr) const override;
222
223 /// \returns true if a branch from an instruction with opcode \p BranchOpc
224 /// bytes is capable of jumping to a position \p BrOffset bytes away.
225 bool isBranchOffsetInRange(unsigned BranchOpc,
226 int64_t BrOffset) const override;
227
228 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
229
231 MachineBasicBlock &NewDestBB,
232 MachineBasicBlock &RestoreBB, const DebugLoc &DL,
233 int64_t BrOffset, RegScavenger *RS) const override;
234
236 MachineBasicBlock *&FBB,
238 bool AllowModify = false) const override;
240 MachineBranchPredicate &MBP,
241 bool AllowModify) const override;
243 int *BytesRemoved = nullptr) const override;
246 const DebugLoc &DL,
247 int *BytesAdded = nullptr) const override;
248 bool
251 Register, Register, Register, int &, int &,
252 int &) const override;
254 const DebugLoc &DL, Register DstReg,
256 Register FalseReg) const override;
257
259 MachineBasicBlock::iterator MI) const override;
260
261 MCInst getNop() const override;
262
264 const MachineBasicBlock *MBB,
265 const MachineFunction &MF) const override;
266
267 /// analyzeCompare - For a comparison instruction, return the source registers
268 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
269 /// Return true if the comparison instruction can be analyzed.
270 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
271 Register &SrcReg2, int64_t &CmpMask,
272 int64_t &CmpValue) const override;
273 /// optimizeCompareInstr - Convert the instruction supplying the argument to
274 /// the comparison into one that sets the zero bit in the flags register.
275 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
276 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
277 const MachineRegisterInfo *MRI) const override;
278 bool optimizeCondBranch(MachineInstr &MI) const override;
279
280 /// Return true when a code sequence can improve throughput. It
281 /// should be called only for instructions in loops.
282 /// \param Pattern - combiner pattern
284 /// Return true when there is potentially a faster code sequence
285 /// for an instruction chain ending in ``Root``. All potential patterns are
286 /// listed in the ``Patterns`` array.
287 bool
290 bool DoRegPressureReduce) const override;
291 /// Return true when Inst is associative and commutative so that it can be
292 /// reassociated. If Invert is true, then the inverse of Inst operation must
293 /// be checked.
295 bool Invert) const override;
296 /// When getMachineCombinerPatterns() finds patterns, this function generates
297 /// the instructions that could replace the original code sequence
302 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
303 /// AArch64 supports MachineCombiner.
304 bool useMachineCombiner() const override;
305
306 bool expandPostRAPseudo(MachineInstr &MI) const override;
307
308 std::pair<unsigned, unsigned>
309 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
316
318 bool OutlineFromLinkOnceODRs) const override;
319 std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo(
320 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
322 Function &F, std::vector<outliner::Candidate> &Candidates) const override;
324 getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
326 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
327 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
329 const outliner::OutlinedFunction &OF) const override;
333 outliner::Candidate &C) const override;
335
338 bool AllowSideEffects = true) const override;
339
340 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
341 uint64_t getElementSizeForOpcode(unsigned Opc) const;
342 /// Returns true if the opcode is for an SVE instruction that sets the
343 /// condition codes as if it's results had been fed to a PTEST instruction
344 /// along with the same general predicate.
345 bool isPTestLikeOpcode(unsigned Opc) const;
346 /// Returns true if the opcode is for an SVE WHILE## instruction.
347 bool isWhileOpcode(unsigned Opc) const;
348 /// Returns true if the instruction has a shift by immediate that can be
349 /// executed in one cycle less.
350 static bool isFalkorShiftExtFast(const MachineInstr &MI);
351 /// Return true if the instructions is a SEH instruciton used for unwinding
352 /// on Windows.
353 static bool isSEHInstruction(const MachineInstr &MI);
354
355 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
356 Register Reg) const override;
357
358 bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
359
360 bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
361
362 std::optional<ParamLoadedValue>
363 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
364
365 unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
366
368 MachineRegisterInfo &MRI) const override;
369
371 int64_t &NumBytes,
372 int64_t &NumPredicateVectors,
373 int64_t &NumDataVectors);
375 int64_t &ByteSized,
376 int64_t &VGSized);
377
378 bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override;
379
380 // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
381 // be used for a load/store of NumBytes. BaseReg is always present and
382 // implicit.
383 bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
384 unsigned Scale) const;
385
386 // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
387 // of the stack. `FrameSetup` is passed as true, if the allocation is a part
388 // of constructing the activation frame of a function.
390 Register TargetReg,
391 bool FrameSetup) const;
392
393#define GET_INSTRINFO_HELPER_DECLS
394#include "AArch64GenInstrInfo.inc"
395
396protected:
397 /// If the specific machine instruction is an instruction that moves/copies
398 /// value from one register to another register return destination and source
399 /// registers as machine operands.
400 std::optional<DestSourcePair>
401 isCopyInstrImpl(const MachineInstr &MI) const override;
402
403private:
404 unsigned getInstBundleLength(const MachineInstr &MI) const;
405
406 /// Sets the offsets on outlined instructions in \p MBB which use SP
407 /// so that they will be valid post-outlining.
408 ///
409 /// \param MBB A \p MachineBasicBlock in an outlined function.
410 void fixupPostOutline(MachineBasicBlock &MBB) const;
411
412 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
413 MachineBasicBlock *TBB,
414 ArrayRef<MachineOperand> Cond) const;
415 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
416 const MachineRegisterInfo &MRI) const;
417 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
418 int CmpValue, const MachineRegisterInfo &MRI) const;
419
420 /// Returns an unused general-purpose register which can be used for
421 /// constructing an outlined call if one exists. Returns 0 otherwise.
422 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
423
424 /// Remove a ptest of a predicate-generating operation that already sets, or
425 /// can be made to set, the condition codes in an identical manner
426 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
427 unsigned PredReg,
428 const MachineRegisterInfo *MRI) const;
429};
430
431struct UsedNZCV {
432 bool N = false;
433 bool Z = false;
434 bool C = false;
435 bool V = false;
436
437 UsedNZCV() = default;
438
439 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
440 this->N |= UsedFlags.N;
441 this->Z |= UsedFlags.Z;
442 this->C |= UsedFlags.C;
443 this->V |= UsedFlags.V;
444 return *this;
445 }
446};
447
448/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
449/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
450/// \returns std::nullopt otherwise.
451///
452/// Collect instructions using that flags in \p CCUseInstrs if provided.
453std::optional<UsedNZCV>
454examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
455 const TargetRegisterInfo &TRI,
456 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
457
458/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
459/// which either reads or clobbers NZCV.
460bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
461 const MachineInstr &UseMI,
462 const TargetRegisterInfo *TRI);
463
464MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
465 unsigned Reg, const StackOffset &Offset,
466 bool LastAdjustmentWasScalable = true);
467MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
468 const StackOffset &OffsetFromDefCFA);
469
470/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
471/// plus Offset. This is intended to be used from within the prolog/epilog
472/// insertion (PEI) pass, where a virtual scratch register may be allocated
473/// if necessary, to be replaced by the scavenger at the end of PEI.
474void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
475 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
476 StackOffset Offset, const TargetInstrInfo *TII,
478 bool SetNZCV = false, bool NeedsWinCFI = false,
479 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
480 StackOffset InitialOffset = {},
481 unsigned FrameReg = AArch64::SP);
482
483/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
484/// FP. Return false if the offset could not be handled directly in MI, and
485/// return the left-over portion by reference.
486bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
487 unsigned FrameReg, StackOffset &Offset,
488 const AArch64InstrInfo *TII);
489
490/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
492 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
493 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
494 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
496
497/// Check if the @p Offset is a valid frame offset for @p MI.
498/// The returned value reports the validity of the frame offset for @p MI.
499/// It uses the values defined by AArch64FrameOffsetStatus for that.
500/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
501/// use an offset.eq
502/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
503/// rewritten in @p MI.
504/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
505/// amount that is off the limit of the legal offset.
506/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
507/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
508/// If set, @p EmittableOffset contains the amount that can be set in @p MI
509/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
510/// is a legal offset.
511int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
512 bool *OutUseUnscaledOp = nullptr,
513 unsigned *OutUnscaledOp = nullptr,
514 int64_t *EmittableOffset = nullptr);
515
516static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
517
518static inline bool isCondBranchOpcode(int Opc) {
519 switch (Opc) {
520 case AArch64::Bcc:
521 case AArch64::CBZW:
522 case AArch64::CBZX:
523 case AArch64::CBNZW:
524 case AArch64::CBNZX:
525 case AArch64::TBZW:
526 case AArch64::TBZX:
527 case AArch64::TBNZW:
528 case AArch64::TBNZX:
529 return true;
530 default:
531 return false;
532 }
533}
534
535static inline bool isIndirectBranchOpcode(int Opc) {
536 switch (Opc) {
537 case AArch64::BR:
538 case AArch64::BRAA:
539 case AArch64::BRAB:
540 case AArch64::BRAAZ:
541 case AArch64::BRABZ:
542 return true;
543 }
544 return false;
545}
546
547static inline bool isPTrueOpcode(unsigned Opc) {
548 switch (Opc) {
549 case AArch64::PTRUE_B:
550 case AArch64::PTRUE_H:
551 case AArch64::PTRUE_S:
552 case AArch64::PTRUE_D:
553 return true;
554 default:
555 return false;
556 }
557}
558
559/// Return opcode to be used for indirect calls.
560unsigned getBLRCallOpcode(const MachineFunction &MF);
561
562/// Return XPAC opcode to be used for a ptrauth strip using the given key.
563static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
564 using namespace AArch64PACKey;
565 switch (K) {
566 case IA: case IB: return AArch64::XPACI;
567 case DA: case DB: return AArch64::XPACD;
568 }
569 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
570}
571
572/// Return AUT opcode to be used for a ptrauth auth using the given key, or its
573/// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
574static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
575 using namespace AArch64PACKey;
576 switch (K) {
577 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
578 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
579 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
580 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
581 }
582}
583
584/// Return PAC opcode to be used for a ptrauth sign using the given key, or its
585/// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
586static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
587 using namespace AArch64PACKey;
588 switch (K) {
589 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
590 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
591 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
592 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
593 }
594}
595
596// struct TSFlags {
597#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
598#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
599#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
600#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
601#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
602// }
603
604namespace AArch64 {
605
613};
614
627};
628
633};
634
635// NOTE: This is a bit field.
638
648};
649
650#undef TSFLAG_ELEMENT_SIZE_TYPE
651#undef TSFLAG_DESTRUCTIVE_INST_TYPE
652#undef TSFLAG_FALSE_LANE_TYPE
653#undef TSFLAG_INSTR_FLAGS
654#undef TSFLAG_SME_MATRIX_TYPE
655
659
661}
662
663} // end namespace llvm
664
665#endif
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
unsigned Reg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static constexpr uint32_t Opcode
Definition: aarch32.h:200
static bool isHForm(const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool hasBTISemantics(const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static bool isQForm(const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors, int64_t &NumDataVectors)
Returns the offset in parts to which this frame offset can be decomposed for the purpose of describin...
static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFPRCopy(const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
bool isThroughputPattern(MachineCombinerPattern Pattern) const override
Return true when a code sequence can improve throughput.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
static int getMemScale(const MachineInstr &MI)
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool isSubregFoldable() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
uint64_t getElementSizeForOpcode(unsigned Opc) const
Returns the vector element size (B, H, S or D) of an SVE opcode.
outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
static bool isGPRCopy(const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static unsigned convertToFlagSettingOpc(unsigned Opc)
Return the opcode that set flags when possible.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
static const MachineOperand & getLdStOffsetOp(const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool isWhileOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE WHILE## instruction.
static std::optional< unsigned > getUnscaledLdSt(unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscale...
static bool hasUnscaledLdStOffset(unsigned Opc)
Return true if it has an unscaled load/store offset.
static bool hasUnscaledLdStOffset(MachineInstr &MI)
static bool isPreLdSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
SmallVector< std::pair< MachineBasicBlock::iterator, MachineBasicBlock::iterator > > getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override
static bool isPairableLdStInst(const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
static bool isPreSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
static bool isPairedLdSt(const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
bool useMachineCombiner() const override
AArch64 supports MachineCombiner.
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, MachineRegisterInfo &MRI) const override
static bool isFalkorShiftExtFast(const MachineInstr &MI)
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
static bool isStridedAccess(const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
static bool isFpOrNEON(const MachineInstr &MI)
Returns whether the instruction is FP or NEON.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
static bool isLdStPairSuppressed(const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
bool isFunctionSafeToSplit(const MachineFunction &MF) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
Return true when Inst is associative and commutative so that it can be reassociated.
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineBasicBlock::iterator probedStackAlloc(MachineBasicBlock::iterator MBBI, Register TargetReg, bool FrameSetup) const
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
static unsigned getLoadStoreImmIdx(unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isGPRZero(const MachineInstr &MI)
Does this instruction set its full destination register to zero?
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2,...
bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset, unsigned Scale) const
static void suppressLdStPair(MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
static bool isPreLd(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static int getMemScale(unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
bool isCandidateToMergeOrPair(const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
MCInst getNop() const override
static const MachineOperand & getLdStBaseOp(const MachineInstr &MI)
Returns the base register operator of a load/store.
bool isPTestLikeOpcode(unsigned Opc) const
Returns true if the opcode is for an SVE instruction that sets the condition codes as if it's results...
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A debug info location.
Definition: DebugLoc.h:33
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
Definition: MachineInstr.h:68
Flags
Flags values. These may be or'd together.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:34
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
int getSVERevInstr(uint16_t Opcode)
int getSMEPseudoMap(uint16_t Opcode)
static const uint64_t InstrFlagIsWhile
static const uint64_t InstrFlagIsPTestLike
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
static bool isCondBranchOpcode(int Opc)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
static bool isPTrueOpcode(unsigned Opc)
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool isIndirectBranchOpcode(int Opc)
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA)
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
AArch64FrameOffsetStatus
Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
static bool isUncondBranchOpcode(int Opc)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static const MachineMemOperand::Flags MOSuppressPair
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers ...
static const MachineMemOperand::Flags MOStridedAccess
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
UsedNZCV & operator|=(const UsedNZCV &UsedFlags)
UsedNZCV()=default
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.