LLVM 23.0.0git
AArch64InstrInfo.h
Go to the documentation of this file.
1//===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15
16#include "AArch64.h"
17#include "AArch64RegisterInfo.h"
20#include <optional>
21
22#define GET_INSTRINFO_HEADER
23#include "AArch64GenInstrInfo.inc"
24
25namespace llvm {
26
27class AArch64Subtarget;
28
33
34#define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
35
36// AArch64 MachineCombiner patterns
38 // These are patterns used to reduce the length of dependence chain.
41
42 // These are multiply-add patterns matched by the AArch64 machine combiner.
55 // NEON integers vectors
68
81
90
99
100 // Floating Point
162
173
175
179};
181 const AArch64RegisterInfo RI;
182 const AArch64Subtarget &Subtarget;
183
184public:
185 explicit AArch64InstrInfo(const AArch64Subtarget &STI);
186
187 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
188 /// such, whenever a client has an instance of instruction info, it should
189 /// always be able to get register info as well (through this method).
190 const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
191
192 unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
193
194 bool isAsCheapAsAMove(const MachineInstr &MI) const override;
195
196 bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
197 Register &DstReg, unsigned &SubIdx) const override;
198
199 bool
201 const MachineInstr &MIb) const override;
202
204 int &FrameIndex) const override;
206 int &FrameIndex) const override;
207
208 /// Check for post-frame ptr elimination stack locations as well. This uses a
209 /// heuristic so it isn't reliable for correctness.
211 int &FrameIndex) const override;
212 /// Check for post-frame ptr elimination stack locations as well. This uses a
213 /// heuristic so it isn't reliable for correctness.
215 int &FrameIndex) const override;
216
217 /// Does this instruction set its full destination register to zero?
218 static bool isGPRZero(const MachineInstr &MI);
219
220 /// Does this instruction rename a GPR without modifying bits?
221 static bool isGPRCopy(const MachineInstr &MI);
222
223 /// Does this instruction rename an FPR without modifying bits?
224 static bool isFPRCopy(const MachineInstr &MI);
225
226 /// Return true if pairing the given load or store is hinted to be
227 /// unprofitable.
228 static bool isLdStPairSuppressed(const MachineInstr &MI);
229
230 /// Return true if the given load or store is a strided memory access.
231 static bool isStridedAccess(const MachineInstr &MI);
232
233 /// Return true if it has an unscaled load/store offset.
234 static bool hasUnscaledLdStOffset(unsigned Opc);
236 return hasUnscaledLdStOffset(MI.getOpcode());
237 }
238
239 /// Returns the unscaled load/store for the scaled load/store opcode,
240 /// if there is a corresponding unscaled variant available.
241 static std::optional<unsigned> getUnscaledLdSt(unsigned Opc);
242
243 /// Scaling factor for (scaled or unscaled) load or store.
244 static int getMemScale(unsigned Opc);
245 static int getMemScale(const MachineInstr &MI) {
246 return getMemScale(MI.getOpcode());
247 }
248
249 /// Returns whether the instruction is a pre-indexed load.
250 static bool isPreLd(const MachineInstr &MI);
251
252 /// Returns whether the instruction is a pre-indexed store.
253 static bool isPreSt(const MachineInstr &MI);
254
255 /// Returns whether the instruction is a pre-indexed load/store.
256 static bool isPreLdSt(const MachineInstr &MI);
257
258 /// Returns whether the instruction is a zero-extending load.
259 static bool isZExtLoad(const MachineInstr &MI);
260
261 /// Returns whether the instruction is a sign-extending load.
262 static bool isSExtLoad(const MachineInstr &MI);
263
264 /// Returns whether the instruction is a paired load/store.
265 static bool isPairedLdSt(const MachineInstr &MI);
266
267 /// Returns the base register operator of a load/store.
268 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI);
269
270 /// Returns the immediate offset operator of a load/store.
271 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI);
272
273 /// Returns whether the physical register is FP or NEON.
274 static bool isFpOrNEON(Register Reg);
275
276 /// Returns the shift amount operator of a load/store.
277 static const MachineOperand &getLdStAmountOp(const MachineInstr &MI);
278
279 /// Returns whether the instruction is FP or NEON.
280 static bool isFpOrNEON(const MachineInstr &MI);
281
282 /// Returns whether the instruction is in H form (16 bit operands)
283 static bool isHForm(const MachineInstr &MI);
284
285 /// Returns whether the instruction is in Q form (128 bit operands)
286 static bool isQForm(const MachineInstr &MI);
287
288 /// Returns whether the instruction can be compatible with non-zero BTYPE.
289 static bool hasBTISemantics(const MachineInstr &MI);
290
291 /// Returns the index for the immediate for a given instruction.
292 static unsigned getLoadStoreImmIdx(unsigned Opc);
293
294 /// Return true if pairing the given load or store may be paired with another.
295 static bool isPairableLdStInst(const MachineInstr &MI);
296
297 /// Returns true if MI is one of the TCRETURN* instructions.
298 static bool isTailCallReturnInst(const MachineInstr &MI);
299
300 /// Return the opcode that set flags when possible. The caller is
301 /// responsible for ensuring the opc has a flag setting equivalent.
302 static unsigned convertToFlagSettingOpc(unsigned Opc);
303
304 /// Return true if this is a load/store that can be potentially paired/merged.
305 bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
306
307 /// Hint that pairing the given load or store is unprofitable.
308 static void suppressLdStPair(MachineInstr &MI);
309
310 std::optional<ExtAddrMode>
312 const TargetRegisterInfo *TRI) const override;
313
315 const MachineInstr &AddrI,
316 ExtAddrMode &AM) const override;
317
319 const ExtAddrMode &AM) const override;
320
323 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
324 const TargetRegisterInfo *TRI) const override;
325
326 /// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
327 /// This is true for some SVE instructions like ldr/str that have a
328 /// 'reg + imm' addressing mode where the immediate is an index to the
329 /// scalable vector located at 'reg + imm * vscale x #bytes'.
331 const MachineOperand *&BaseOp,
332 int64_t &Offset, bool &OffsetIsScalable,
333 TypeSize &Width,
334 const TargetRegisterInfo *TRI) const;
335
336 /// Return the immediate offset of the base register in a load/store \p LdSt.
338
339 /// Returns true if opcode \p Opc is a memory operation. If it is, set
340 /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
341 ///
342 /// For unscaled instructions, \p Scale is set to 1. All values are in bytes.
343 /// MinOffset/MaxOffset are the un-scaled limits of the immediate in the
344 /// instruction, the actual offset limit is [MinOffset*Scale,
345 /// MaxOffset*Scale].
346 static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width,
347 int64_t &MinOffset, int64_t &MaxOffset);
348
350 int64_t Offset1, bool OffsetIsScalable1,
352 int64_t Offset2, bool OffsetIsScalable2,
353 unsigned ClusterSize,
354 unsigned NumBytes) const override;
355
357 const DebugLoc &DL, MCRegister DestReg,
358 MCRegister SrcReg, bool KillSrc, unsigned Opcode,
359 llvm::ArrayRef<unsigned> Indices) const;
361 const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
362 bool KillSrc, unsigned Opcode, unsigned ZeroReg,
363 llvm::ArrayRef<unsigned> Indices) const;
365 const DebugLoc &DL, Register DestReg, Register SrcReg,
366 bool KillSrc, bool RenamableDest = false,
367 bool RenamableSrc = false) const override;
368
371 bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
372 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
373
376 Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
377 Register VReg, unsigned SubReg = 0,
378 MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
379
380 // This tells target independent code that it is okay to pass instructions
381 // with subreg operands to foldMemoryOperandImpl.
382 bool isSubregFoldable() const override { return true; }
383
386 ArrayRef<unsigned> Ops, int FrameIndex,
387 MachineInstr *&CopyMI,
388 LiveIntervals *LIS = nullptr,
389 VirtRegMap *VRM = nullptr) const override;
390
391 /// \returns true if a branch from an instruction with opcode \p BranchOpc
392 /// bytes is capable of jumping to a position \p BrOffset bytes away.
393 bool isBranchOffsetInRange(unsigned BranchOpc,
394 int64_t BrOffset) const override;
395
396 MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
397
399 MachineBasicBlock &NewDestBB,
400 MachineBasicBlock &RestoreBB, const DebugLoc &DL,
401 int64_t BrOffset, RegScavenger *RS) const override;
402
404 MachineBasicBlock *&FBB,
406 bool AllowModify = false) const override;
408 MachineBranchPredicate &MBP,
409 bool AllowModify) const override;
411 int *BytesRemoved = nullptr) const override;
414 const DebugLoc &DL,
415 int *BytesAdded = nullptr) const override;
416
417 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
418 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
419
420 bool
423 Register, Register, Register, int &, int &,
424 int &) const override;
426 const DebugLoc &DL, Register DstReg,
428 Register FalseReg) const override;
429
431 MachineBasicBlock::iterator MI) const override;
432
433 MCInst getNop() const override;
434
436 const MachineBasicBlock *MBB,
437 const MachineFunction &MF) const override;
438
439 /// analyzeCompare - For a comparison instruction, return the source registers
440 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
441 /// Return true if the comparison instruction can be analyzed.
442 bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
443 Register &SrcReg2, int64_t &CmpMask,
444 int64_t &CmpValue) const override;
445 /// optimizeCompareInstr - Convert the instruction supplying the argument to
446 /// the comparison into one that sets the zero bit in the flags register.
447 bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
448 Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
449 const MachineRegisterInfo *MRI) const override;
450 bool optimizeCondBranch(MachineInstr &MI) const override;
451
452 CombinerObjective getCombinerObjective(unsigned Pattern) const override;
453 /// Return true when a code sequence can improve throughput. It
454 /// should be called only for instructions in loops.
455 /// \param Pattern - combiner pattern
456 bool isThroughputPattern(unsigned Pattern) const override;
457 /// Return true when there is potentially a faster code sequence
458 /// for an instruction chain ending in ``Root``. All potential patterns are
459 /// listed in the ``Patterns`` array.
460 bool getMachineCombinerPatterns(MachineInstr &Root,
462 bool DoRegPressureReduce) const override;
463 /// Return true when Inst is associative and commutative so that it can be
464 /// reassociated. If Invert is true, then the inverse of Inst operation must
465 /// be checked.
466 bool isAssociativeAndCommutative(const MachineInstr &Inst,
467 bool Invert) const override;
468
469 /// Returns true if \P Opcode is an instruction which performs accumulation
470 /// into a destination register.
471 bool isAccumulationOpcode(unsigned Opcode) const override;
472
473 /// Returns an opcode which defines the accumulator used by \P Opcode.
474 unsigned getAccumulationStartOpcode(unsigned Opcode) const override;
475
476 unsigned
477 getReduceOpcodeForAccumulator(unsigned int AccumulatorOpCode) const override;
478
479 /// When getMachineCombinerPatterns() finds patterns, this function
480 /// generates the instructions that could replace the original code
481 /// sequence
482 void genAlternativeCodeSequence(
483 MachineInstr &Root, unsigned Pattern,
486 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const override;
487 /// AArch64 supports MachineCombiner.
488 bool useMachineCombiner() const override;
489
490 bool expandPostRAPseudo(MachineInstr &MI) const override;
491
492 std::pair<unsigned, unsigned>
493 decomposeMachineOperandsTargetFlags(unsigned TF) const override;
495 getSerializableDirectMachineOperandTargetFlags() const override;
497 getSerializableBitmaskMachineOperandTargetFlags() const override;
499 getSerializableMachineMemOperandTargetFlags() const override;
500
501 bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
502 bool OutlineFromLinkOnceODRs) const override;
503 std::optional<std::unique_ptr<outliner::OutlinedFunction>>
504 getOutliningCandidateInfo(
505 const MachineModuleInfo &MMI,
506 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
507 unsigned MinRepeats) const override;
508 void mergeOutliningCandidateAttributes(
509 Function &F, std::vector<outliner::Candidate> &Candidates) const override;
510 outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI,
512 unsigned Flags) const override;
514 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
515 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const override;
516 void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
517 const outliner::OutlinedFunction &OF) const override;
519 insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
521 outliner::Candidate &C) const override;
522 bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
523
524 void buildClearRegister(Register Reg, MachineBasicBlock &MBB,
526 bool AllowSideEffects = true) const override;
527
528 /// Returns the vector element size (B, H, S or D) of an SVE opcode.
529 uint64_t getElementSizeForOpcode(unsigned Opc) const;
530 /// Returns true if the opcode is for an SVE instruction that sets the
531 /// condition codes as if it's results had been fed to a PTEST instruction
532 /// along with the same general predicate.
533 bool isPTestLikeOpcode(unsigned Opc) const;
534 /// Returns true if the opcode is for an SVE WHILE## instruction.
535 bool isWhileOpcode(unsigned Opc) const;
536 /// Returns true if the instruction has a shift by immediate that can be
537 /// executed in one cycle less.
538 static bool isFalkorShiftExtFast(const MachineInstr &MI);
539 /// Return true if the instructions is a SEH instruction used for unwinding
540 /// on Windows.
541 static bool isSEHInstruction(const MachineInstr &MI);
542
543 std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
544 Register Reg) const override;
545
546 bool isFunctionSafeToSplit(const MachineFunction &MF) const override;
547
548 bool isMBBSafeToSplitToCold(const MachineBasicBlock &MBB) const override;
549
550 std::optional<ParamLoadedValue>
551 describeLoadedValue(const MachineInstr &MI, Register Reg) const override;
552
553 unsigned int getTailDuplicateSize(CodeGenOptLevel OptLevel) const override;
554
555 bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
556 MachineRegisterInfo &MRI) const override;
557
558 static void decomposeStackOffsetForFrameOffsets(const StackOffset &Offset,
559 int64_t &NumBytes,
560 int64_t &NumPredicateVectors,
561 int64_t &NumDataVectors);
562 static void decomposeStackOffsetForDwarfOffsets(const StackOffset &Offset,
563 int64_t &ByteSized,
564 int64_t &VGSized);
565
566 // Return true if address of the form BaseReg + Scale * ScaledReg + Offset can
567 // be used for a load/store of NumBytes. BaseReg is always present and
568 // implicit.
569 bool isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
570 unsigned Scale) const;
571
572 // Decrement the SP, issuing probes along the way. `TargetReg` is the new top
573 // of the stack. `FrameSetup` is passed as true, if the allocation is a part
574 // of constructing the activation frame of a function.
576 Register TargetReg,
577 bool FrameSetup) const;
578
579 static int
580 findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr);
581
582 /// Insert a `PAUTH_EPILOGUE` pseudo before the first terminator in \p MBB to
583 /// authenticate the return address. Adds an implicit def of X16 when the
584 /// branch protection uses PAuthLR but the subtarget lacks PAuthLR
585 /// instructions.
587
588#define GET_INSTRINFO_HELPER_DECLS
589#include "AArch64GenInstrInfo.inc"
590
591protected:
592 /// If the specific machine instruction is an instruction that moves/copies
593 /// value from one register to another register return destination and source
594 /// registers as machine operands.
595 std::optional<DestSourcePair>
596 isCopyInstrImpl(const MachineInstr &MI) const override;
597 std::optional<DestSourcePair>
598 isCopyLikeInstrImpl(const MachineInstr &MI) const override;
599
600private:
601 /// Sets the offsets on outlined instructions in \p MBB which use SP
602 /// so that they will be valid post-outlining.
603 ///
604 /// \param MBB A \p MachineBasicBlock in an outlined function.
605 void fixupPostOutline(MachineBasicBlock &MBB) const;
606
607 void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
608 MachineBasicBlock *TBB,
610 bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
611 const MachineRegisterInfo &MRI) const;
612 bool removeCmpToZeroOrOne(MachineInstr &CmpInstr, unsigned SrcReg,
613 int CmpValue, const MachineRegisterInfo &MRI) const;
614
615 /// Returns an unused general-purpose register which can be used for
616 /// constructing an outlined call if one exists. Returns 0 otherwise.
617 Register findRegisterToSaveLRTo(outliner::Candidate &C) const;
618
619 /// Remove a ptest of a predicate-generating operation that already sets, or
620 /// can be made to set, the condition codes in an identical manner
621 bool optimizePTestInstr(MachineInstr *PTest, unsigned MaskReg,
622 unsigned PredReg,
623 const MachineRegisterInfo *MRI) const;
624 std::optional<unsigned>
625 canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
626 MachineInstr *Pred, const MachineRegisterInfo *MRI) const;
627
628 /// verifyInstruction - Perform target specific instruction verification.
629 bool verifyInstruction(const MachineInstr &MI,
630 StringRef &ErrInfo) const override;
631};
632
633struct UsedNZCV {
634 bool N = false;
635 bool Z = false;
636 bool C = false;
637 bool V = false;
638
639 UsedNZCV() = default;
640
641 UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
642 this->N |= UsedFlags.N;
643 this->Z |= UsedFlags.Z;
644 this->C |= UsedFlags.C;
645 this->V |= UsedFlags.V;
646 return *this;
647 }
648};
649
650/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
651/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
652/// \returns std::nullopt otherwise.
653///
654/// Collect instructions using that flags in \p CCUseInstrs if provided.
655std::optional<UsedNZCV>
656examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
657 const TargetRegisterInfo &TRI,
658 SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
659
660/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
661/// which either reads or clobbers NZCV.
662bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
663 const MachineInstr &UseMI,
664 const TargetRegisterInfo *TRI);
665
666MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg,
667 unsigned Reg, const StackOffset &Offset,
668 bool LastAdjustmentWasScalable = true);
669MCCFIInstruction
670createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg,
671 const StackOffset &OffsetFromDefCFA,
672 std::optional<int64_t> IncomingVGOffsetFromDefCFA);
673
674/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
675/// plus Offset. This is intended to be used from within the prolog/epilog
676/// insertion (PEI) pass, where a virtual scratch register may be allocated
677/// if necessary, to be replaced by the scavenger at the end of PEI.
678void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
679 const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
680 StackOffset Offset, const TargetInstrInfo *TII,
682 bool SetNZCV = false, bool NeedsWinCFI = false,
683 bool *HasWinCFI = nullptr, bool EmitCFAOffset = false,
684 StackOffset InitialOffset = {},
685 unsigned FrameReg = AArch64::SP);
686
687/// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
688/// FP. Return false if the offset could not be handled directly in MI, and
689/// return the left-over portion by reference.
690bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
691 unsigned FrameReg, StackOffset &Offset,
692 const AArch64InstrInfo *TII);
693
694/// Use to report the frame offset status in isAArch64FrameOffsetLegal.
696 AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
697 AArch64FrameOffsetIsLegal = 0x1, ///< Offset is legal.
698 AArch64FrameOffsetCanUpdate = 0x2 ///< Offset can apply, at least partly.
699};
700
701/// Check if the @p Offset is a valid frame offset for @p MI.
702/// The returned value reports the validity of the frame offset for @p MI.
703/// It uses the values defined by AArch64FrameOffsetStatus for that.
704/// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
705/// use an offset.eq
706/// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
707/// rewritten in @p MI.
708/// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
709/// amount that is off the limit of the legal offset.
710/// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
711/// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
712/// If set, @p EmittableOffset contains the amount that can be set in @p MI
713/// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
714/// is a legal offset.
715int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
716 bool *OutUseUnscaledOp = nullptr,
717 unsigned *OutUnscaledOp = nullptr,
718 int64_t *EmittableOffset = nullptr);
719
720bool optimizeTerminators(MachineBasicBlock *MBB, const TargetInstrInfo &TII);
721
722static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
723
724static inline bool isCondBranchOpcode(int Opc) {
725 switch (Opc) {
726 case AArch64::Bcc:
727 case AArch64::CBZW:
728 case AArch64::CBZX:
729 case AArch64::CBNZW:
730 case AArch64::CBNZX:
731 case AArch64::TBZW:
732 case AArch64::TBZX:
733 case AArch64::TBNZW:
734 case AArch64::TBNZX:
735 case AArch64::CBWPri:
736 case AArch64::CBXPri:
737 case AArch64::CBBAssertExt:
738 case AArch64::CBHAssertExt:
739 case AArch64::CBWPrr:
740 case AArch64::CBXPrr:
741 return true;
742 default:
743 return false;
744 }
745}
746
747static inline bool isIndirectBranchOpcode(int Opc) {
748 switch (Opc) {
749 case AArch64::BR:
750 case AArch64::BRAA:
751 case AArch64::BRAB:
752 case AArch64::BRAAZ:
753 case AArch64::BRABZ:
754 return true;
755 }
756 return false;
757}
758
759static inline bool isIndirectCallOpcode(unsigned Opc) {
760 switch (Opc) {
761 case AArch64::BLR:
762 case AArch64::BLRAA:
763 case AArch64::BLRAB:
764 case AArch64::BLRAAZ:
765 case AArch64::BLRABZ:
766 return true;
767 default:
768 return false;
769 }
770}
771
772static inline bool isPTrueOpcode(unsigned Opc) {
773 switch (Opc) {
774 case AArch64::PTRUE_B:
775 case AArch64::PTRUE_H:
776 case AArch64::PTRUE_S:
777 case AArch64::PTRUE_D:
778 return true;
779 default:
780 return false;
781 }
782}
783
784/// Return opcode to be used for indirect calls.
785unsigned getBLRCallOpcode(const MachineFunction &MF);
786
787/// Return XPAC opcode to be used for a ptrauth strip using the given key.
788static inline unsigned getXPACOpcodeForKey(AArch64PACKey::ID K) {
789 using namespace AArch64PACKey;
790 switch (K) {
791 case IA: case IB: return AArch64::XPACI;
792 case DA: case DB: return AArch64::XPACD;
793 }
794 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
795}
796
797/// Return AUT opcode to be used for a ptrauth auth using the given key, or its
798/// AUT*Z variant that doesn't take a discriminator operand, using zero instead.
799static inline unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
800 using namespace AArch64PACKey;
801 switch (K) {
802 case IA: return Zero ? AArch64::AUTIZA : AArch64::AUTIA;
803 case IB: return Zero ? AArch64::AUTIZB : AArch64::AUTIB;
804 case DA: return Zero ? AArch64::AUTDZA : AArch64::AUTDA;
805 case DB: return Zero ? AArch64::AUTDZB : AArch64::AUTDB;
806 }
807 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
808}
809
810/// Return PAC opcode to be used for a ptrauth sign using the given key, or its
811/// PAC*Z variant that doesn't take a discriminator operand, using zero instead.
812static inline unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero) {
813 using namespace AArch64PACKey;
814 switch (K) {
815 case IA: return Zero ? AArch64::PACIZA : AArch64::PACIA;
816 case IB: return Zero ? AArch64::PACIZB : AArch64::PACIB;
817 case DA: return Zero ? AArch64::PACDZA : AArch64::PACDA;
818 case DB: return Zero ? AArch64::PACDZB : AArch64::PACDB;
819 }
820 llvm_unreachable("Unhandled AArch64PACKey::ID enum");
821}
822
823/// Return B(L)RA opcode to be used for an authenticated branch or call using
824/// the given key, or its B(L)RA*Z variant that doesn't take a discriminator
825/// operand, using zero instead.
826static inline unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K,
827 bool Zero) {
828 using namespace AArch64PACKey;
829 static const unsigned BranchOpcode[2][2] = {
830 {AArch64::BRAA, AArch64::BRAAZ},
831 {AArch64::BRAB, AArch64::BRABZ},
832 };
833 static const unsigned CallOpcode[2][2] = {
834 {AArch64::BLRAA, AArch64::BLRAAZ},
835 {AArch64::BLRAB, AArch64::BLRABZ},
836 };
837
838 assert((K == IA || K == IB) && "B(L)RA* instructions require IA or IB key");
839 if (IsCall)
840 return CallOpcode[K == IB][Zero];
841 return BranchOpcode[K == IB][Zero];
842}
843
844// struct TSFlags {
845#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
846#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
847#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
848#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
849#define TSFLAG_SME_MATRIX_TYPE(X) ((X) << 11) // 3-bits
850// }
851
852namespace AArch64 {
853
854// clang-format off
863
880
886
887// clang-format on
888
889// NOTE: This is a bit field.
892
903
904#undef TSFLAG_ELEMENT_SIZE_TYPE
905#undef TSFLAG_DESTRUCTIVE_INST_TYPE
906#undef TSFLAG_FALSE_LANE_TYPE
907#undef TSFLAG_INSTR_FLAGS
908#undef TSFLAG_SME_MATRIX_TYPE
909
910int32_t getSVEPseudoMap(uint32_t Opcode);
911int32_t getSVERevInstr(uint32_t Opcode);
913
914int32_t getSMEPseudoMap(uint32_t Opcode);
915}
916
917} // end namespace llvm
918
919#endif
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X)
#define TSFLAG_SME_MATRIX_TYPE(X)
#define TSFLAG_FALSE_LANE_TYPE(X)
#define TSFLAG_INSTR_FLAGS(X)
#define TSFLAG_ELEMENT_SIZE_TYPE(X)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isHForm(const MachineInstr &MI)
Returns whether the instruction is in H form (16 bit operands)
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
static bool hasBTISemantics(const MachineInstr &MI)
Returns whether the instruction can be compatible with non-zero BTYPE.
static bool isQForm(const MachineInstr &MI)
Returns whether the instruction is in Q form (128 bit operands)
static bool getMemOpInfo(unsigned Opcode, TypeSize &Scale, TypeSize &Width, int64_t &MinOffset, int64_t &MaxOffset)
Returns true if opcode Opc is a memory operation.
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFPRCopy(const MachineInstr &MI)
Does this instruction rename an FPR without modifying bits?
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
static int getMemScale(const MachineInstr &MI)
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
bool isSubregFoldable() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
static bool isZExtLoad(const MachineInstr &MI)
Returns whether the instruction is a zero-extending load.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
static bool isGPRCopy(const MachineInstr &MI)
Does this instruction rename a GPR without modifying bits?
static unsigned convertToFlagSettingOpc(unsigned Opc)
Return the opcode that set flags when possible.
void createPauthEpilogueInstr(MachineBasicBlock &MBB, DebugLoc DL) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
Check for post-frame ptr elimination stack locations as well.
static const MachineOperand & getLdStOffsetOp(const MachineInstr &MI)
Returns the immediate offset operator of a load/store.
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
static std::optional< unsigned > getUnscaledLdSt(unsigned Opc)
Returns the unscaled load/store for the scaled load/store opcode, if there is a corresponding unscale...
static bool hasUnscaledLdStOffset(unsigned Opc)
Return true if it has an unscaled load/store offset.
static const MachineOperand & getLdStAmountOp(const MachineInstr &MI)
Returns the shift amount operator of a load/store.
static bool hasUnscaledLdStOffset(MachineInstr &MI)
static bool isPreLdSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load/store.
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, MachineBranchPredicate &MBP, bool AllowModify) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isPairableLdStInst(const MachineInstr &MI)
Return true if pairing the given load or store may be paired with another.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isSExtLoad(const MachineInstr &MI)
Returns whether the instruction is a sign-extending load.
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
static bool isPreSt(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed store.
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
AArch64InstrInfo(const AArch64Subtarget &STI)
static bool isPairedLdSt(const MachineInstr &MI)
Returns whether the instruction is a paired load/store.
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, bool &OffsetIsScalable, TypeSize &Width, const TargetRegisterInfo *TRI) const
If OffsetIsScalable is set to 'true', the offset is scaled by vscale.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
static bool isStridedAccess(const MachineInstr &MI)
Return true if the given load or store is a strided memory access.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Detect opportunities for ldp/stp formation.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
bool isThroughputPattern(unsigned Pattern) const override
Return true when a code sequence can improve throughput.
MachineOperand & getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const
Return the immediate offset of the base register in a load/store LdSt.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
static bool isLdStPairSuppressed(const MachineInstr &MI)
Return true if pairing the given load or store is hinted to be unprofitable.
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
Check for post-frame ptr elimination stack locations as well.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
static unsigned getLoadStoreImmIdx(unsigned Opc)
Returns the index for the immediate for a given instruction.
static bool isGPRZero(const MachineInstr &MI)
Does this instruction set its full destination register to zero?
void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, unsigned ZeroReg, llvm::ArrayRef< unsigned > Indices) const
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2,...
CombinerObjective getCombinerObjective(unsigned Pattern) const override
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isAsCheapAsAMove(const MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyLikeInstrImpl(const MachineInstr &MI) const override
static void suppressLdStPair(MachineInstr &MI)
Hint that pairing the given load or store is unprofitable.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isPreLd(const MachineInstr &MI)
Returns whether the instruction is a pre-indexed load.
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static int getMemScale(unsigned Opc)
Scaling factor for (scaled or unscaled) load or store.
bool isCandidateToMergeOrPair(const MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
MCInst getNop() const override
static const MachineOperand & getLdStBaseOp(const MachineInstr &MI)
Returns the base register operator of a load/store.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
A debug info location.
Definition DebugLoc.h:123
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MachineInstrBundleIterator< MachineInstr > iterator
Representation of each machine instruction.
Flags
Flags values. These may be or'd together.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
virtual MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const
Target-dependent implementation for foldMemoryOperand.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static const uint64_t InstrFlagIsWhile
static const uint64_t InstrFlagIsPTestLike
int32_t getSVERevInstr(uint32_t Opcode)
int32_t getSMEPseudoMap(uint32_t Opcode)
int32_t getSVENonRevInstr(uint32_t Opcode)
int32_t getSVEPseudoMap(uint32_t Opcode)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
static bool isCondBranchOpcode(int Opc)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
static bool isPTrueOpcode(unsigned Opc)
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
static bool isIndirectBranchOpcode(int Opc)
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
AArch64FrameOffsetStatus
Use to report the frame offset status in isAArch64FrameOffsetLegal.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
static bool isSEHInstruction(const MachineInstr &MI)
static bool isIndirectCallOpcode(unsigned Opc)
AArch64MachineCombinerPattern
@ MULSUBv8i16_OP2
@ FMULv4i16_indexed_OP1
@ FMLSv1i32_indexed_OP2
@ MULSUBv2i32_indexed_OP1
@ FMLAv2i32_indexed_OP2
@ MULADDv4i16_indexed_OP2
@ FMLAv1i64_indexed_OP1
@ MULSUBv16i8_OP1
@ FMLAv8i16_indexed_OP2
@ FMULv2i32_indexed_OP1
@ MULSUBv8i16_indexed_OP2
@ FMLAv1i64_indexed_OP2
@ MULSUBv4i16_indexed_OP2
@ FMLAv1i32_indexed_OP1
@ FMLAv2i64_indexed_OP2
@ FMLSv8i16_indexed_OP1
@ MULSUBv2i32_OP1
@ FMULv4i16_indexed_OP2
@ MULSUBv4i32_indexed_OP2
@ FMULv2i64_indexed_OP2
@ FMLAv4i32_indexed_OP1
@ MULADDv4i16_OP2
@ FMULv8i16_indexed_OP2
@ MULSUBv4i16_OP1
@ MULADDv4i32_OP2
@ MULADDv2i32_OP2
@ MULADDv16i8_OP2
@ FMLSv4i16_indexed_OP1
@ MULADDv16i8_OP1
@ FMLAv2i64_indexed_OP1
@ FMLAv1i32_indexed_OP2
@ FMLSv2i64_indexed_OP2
@ MULADDv2i32_OP1
@ MULADDv4i32_OP1
@ MULADDv2i32_indexed_OP1
@ MULSUBv16i8_OP2
@ MULADDv4i32_indexed_OP1
@ MULADDv2i32_indexed_OP2
@ FMLAv4i16_indexed_OP2
@ MULSUBv8i16_OP1
@ FMULv2i32_indexed_OP2
@ FMLSv2i32_indexed_OP2
@ FMLSv4i32_indexed_OP1
@ FMULv2i64_indexed_OP1
@ MULSUBv4i16_OP2
@ FMLSv4i16_indexed_OP2
@ FMLAv2i32_indexed_OP1
@ FMLSv2i32_indexed_OP1
@ FMLAv8i16_indexed_OP1
@ MULSUBv4i16_indexed_OP1
@ FMLSv4i32_indexed_OP2
@ MULADDv4i32_indexed_OP2
@ MULSUBv4i32_OP2
@ MULSUBv8i16_indexed_OP1
@ MULADDv8i16_OP2
@ MULSUBv2i32_indexed_OP2
@ FMULv4i32_indexed_OP2
@ FMLSv2i64_indexed_OP1
@ MULADDv4i16_OP1
@ FMLAv4i32_indexed_OP2
@ MULADDv8i16_indexed_OP1
@ FMULv4i32_indexed_OP1
@ FMLAv4i16_indexed_OP1
@ FMULv8i16_indexed_OP1
@ MULADDv8i16_OP1
@ MULSUBv4i32_indexed_OP1
@ MULSUBv4i32_OP1
@ FMLSv8i16_indexed_OP2
@ MULADDv8i16_indexed_OP2
@ MULSUBv2i32_OP2
@ FMLSv1i64_indexed_OP2
@ MULADDv4i16_indexed_OP1
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
std::optional< UsedNZCV > examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, const TargetRegisterInfo &TRI, SmallVectorImpl< MachineInstr * > *CCUseInstrs=nullptr)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static const MachineMemOperand::Flags MOSuppressPair
bool optimizeTerminators(MachineBasicBlock *MBB, const TargetInstrInfo &TII)
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI, const MachineInstr &UseMI, const TargetRegisterInfo *TRI)
Return true if there is an instruction /after/ DefMI and before UseMI which either reads or clobbers ...
static const MachineMemOperand::Flags MOStridedAccess
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
UsedNZCV & operator|=(const UsedNZCV &UsedFlags)
UsedNZCV()=default
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.