LLVM  15.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
18 #include "AArch64Subtarget.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/Triple.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/Function.h"
33 
34 using namespace llvm;
35 
36 #define GET_REGINFO_TARGET_DESC
37 #include "AArch64GenRegisterInfo.inc"
38 
40  : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
42 }
43 
44 /// Return whether the register needs a CFI entry. Not all unwinders may know
45 /// about SVE registers, so we assume the lowest common denominator, i.e. the
46 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
47 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
48 /// returned in \p RegToUseForCFI.
50  unsigned &RegToUseForCFI) const {
51  if (AArch64::PPRRegClass.contains(Reg))
52  return false;
53 
54  if (AArch64::ZPRRegClass.contains(Reg)) {
55  RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
56  for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
57  if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
58  return true;
59  }
60  return false;
61  }
62 
63  RegToUseForCFI = Reg;
64  return true;
65 }
66 
68  const Function &F = MF->getFunction();
69  return isa<ScalableVectorType>(F.getReturnType()) ||
70  any_of(F.args(), [](const Argument &Arg) {
71  return isa<ScalableVectorType>(Arg.getType());
72  });
73 }
74 
75 const MCPhysReg *
77  assert(MF && "Invalid MachineFunction pointer.");
78 
80  // GHC set of callee saved regs is empty as all those regs are
81  // used for passing STG regs around
82  return CSR_AArch64_NoRegs_SaveList;
84  return CSR_AArch64_AllRegs_SaveList;
85 
86  // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
87  // lists depending on that will need to have their Darwin variant as well.
89  return getDarwinCalleeSavedRegs(MF);
90 
92  return CSR_Win_AArch64_CFGuard_Check_SaveList;
94  return CSR_Win_AArch64_AAPCS_SaveList;
96  return CSR_AArch64_AAVPCS_SaveList;
98  return CSR_AArch64_SVE_AAPCS_SaveList;
100  ->supportSwiftError() &&
102  Attribute::SwiftError))
103  return CSR_AArch64_AAPCS_SwiftError_SaveList;
105  return CSR_AArch64_AAPCS_SwiftTail_SaveList;
107  return CSR_AArch64_RT_MostRegs_SaveList;
109  // This is for OSes other than Windows; Windows is a separate case further
110  // above.
111  return CSR_AArch64_AAPCS_X18_SaveList;
112  if (hasSVEArgsOrReturn(MF))
113  return CSR_AArch64_SVE_AAPCS_SaveList;
114  return CSR_AArch64_AAPCS_SaveList;
115 }
116 
117 const MCPhysReg *
119  assert(MF && "Invalid MachineFunction pointer.");
121  "Invalid subtarget for getDarwinCalleeSavedRegs");
122 
125  "Calling convention CFGuard_Check is unsupported on Darwin.");
127  return CSR_Darwin_AArch64_AAVPCS_SaveList;
130  "Calling convention SVE_VectorCall is unsupported on Darwin.");
132  return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
133  ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
134  : CSR_Darwin_AArch64_CXX_TLS_SaveList;
136  ->supportSwiftError() &&
138  Attribute::SwiftError))
139  return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
141  return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
143  return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
144  return CSR_Darwin_AArch64_AAPCS_SaveList;
145 }
146 
148  const MachineFunction *MF) const {
149  assert(MF && "Invalid MachineFunction pointer.");
152  return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
153  return nullptr;
154 }
155 
157  MachineFunction &MF) const {
158  const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
159  SmallVector<MCPhysReg, 32> UpdatedCSRs;
160  for (const MCPhysReg *I = CSRs; *I; ++I)
161  UpdatedCSRs.push_back(*I);
162 
163  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
165  UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
166  }
167  }
168  // Register lists are zero-terminated.
169  UpdatedCSRs.push_back(0);
170  MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
171 }
172 
173 const TargetRegisterClass *
175  unsigned Idx) const {
176  // edge case for GPR/FPR register classes
177  if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
178  return &AArch64::FPR32RegClass;
179  else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
180  return &AArch64::FPR64RegClass;
181 
182  // Forward to TableGen's default version.
183  return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
184 }
185 
186 const uint32_t *
188  CallingConv::ID CC) const {
190  "Invalid subtarget for getDarwinCallPreservedMask");
191 
192  if (CC == CallingConv::CXX_FAST_TLS)
193  return CSR_Darwin_AArch64_CXX_TLS_RegMask;
195  return CSR_Darwin_AArch64_AAVPCS_RegMask;
198  "Calling convention SVE_VectorCall is unsupported on Darwin.");
199  if (CC == CallingConv::CFGuard_Check)
201  "Calling convention CFGuard_Check is unsupported on Darwin.");
204  ->supportSwiftError() &&
205  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
206  return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
207  if (CC == CallingConv::SwiftTail)
208  return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
209  if (CC == CallingConv::PreserveMost)
210  return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
211  return CSR_Darwin_AArch64_AAPCS_RegMask;
212 }
213 
214 const uint32_t *
216  CallingConv::ID CC) const {
217  bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
218  if (CC == CallingConv::GHC)
219  // This is academic because all GHC calls are (supposed to be) tail calls
220  return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
221  if (CC == CallingConv::AnyReg)
222  return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
223 
224  // All the following calling conventions are handled differently on Darwin.
226  if (SCS)
227  report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
228  return getDarwinCallPreservedMask(MF, CC);
229  }
230 
232  return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
234  return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
235  : CSR_AArch64_SVE_AAPCS_RegMask;
236  if (CC == CallingConv::CFGuard_Check)
237  return CSR_Win_AArch64_CFGuard_Check_RegMask;
239  ->supportSwiftError() &&
240  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
241  return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
242  : CSR_AArch64_AAPCS_SwiftError_RegMask;
243  if (CC == CallingConv::SwiftTail) {
244  if (SCS)
245  report_fatal_error("ShadowCallStack attribute not supported with swifttail");
246  return CSR_AArch64_AAPCS_SwiftTail_RegMask;
247  }
248  if (CC == CallingConv::PreserveMost)
249  return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
250  : CSR_AArch64_RT_MostRegs_RegMask;
251  else
252  return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
253 }
254 
256  const MachineFunction &MF) const {
258  return CSR_AArch64_AAPCS_RegMask;
259 
260  return nullptr;
261 }
262 
264  if (TT.isOSDarwin())
265  return CSR_Darwin_AArch64_TLS_RegMask;
266 
267  assert(TT.isOSBinFormatELF() && "Invalid target");
268  return CSR_AArch64_TLS_ELF_RegMask;
269 }
270 
272  const uint32_t **Mask) const {
273  uint32_t *UpdatedMask = MF.allocateRegMask();
274  unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
275  memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
276 
277  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
279  for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
280  this, true);
281  SubReg.isValid(); ++SubReg) {
282  // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
283  // register mask.
284  UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
285  }
286  }
287  }
288  *Mask = UpdatedMask;
289 }
290 
292  return CSR_AArch64_NoRegs_RegMask;
293 }
294 
295 const uint32_t *
297  CallingConv::ID CC) const {
298  // This should return a register mask that is the same as that returned by
299  // getCallPreservedMask but that additionally preserves the register used for
300  // the first i64 argument (which must also be the register used to return a
301  // single i64 return value)
302  //
303  // In case that the calling convention does not use the same register for
304  // both, the function should return NULL (does not currently apply)
305  assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
307  return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
308  return CSR_AArch64_AAPCS_ThisReturn_RegMask;
309 }
310 
312  return CSR_AArch64_StackProbe_Windows_RegMask;
313 }
314 
315 BitVector
317  const AArch64FrameLowering *TFI = getFrameLowering(MF);
318 
319  // FIXME: avoid re-calculating this every time.
320  BitVector Reserved(getNumRegs());
321  markSuperRegs(Reserved, AArch64::WSP);
322  markSuperRegs(Reserved, AArch64::WZR);
323 
324  if (TFI->hasFP(MF) || TT.isOSDarwin())
325  markSuperRegs(Reserved, AArch64::W29);
326 
327  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
329  markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
330  }
331 
332  if (hasBasePointer(MF))
333  markSuperRegs(Reserved, AArch64::W19);
334 
335  // SLH uses register W16/X16 as the taint register.
336  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
337  markSuperRegs(Reserved, AArch64::W16);
338 
339  assert(checkAllSuperRegsMarked(Reserved));
340  return Reserved;
341 }
342 
344  MCRegister Reg) const {
345  return getReservedRegs(MF)[Reg];
346 }
347 
349  return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
350  return isReservedReg(MF, r);
351  });
352 }
353 
355  const MachineFunction &MF) const {
356  const Function &F = MF.getFunction();
357  F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
358  " function calls if any of the argument registers is reserved.")});
359 }
360 
362  MCRegister PhysReg) const {
363  return !isReservedReg(MF, PhysReg);
364 }
365 
367  return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
368 }
369 
370 const TargetRegisterClass *
372  unsigned Kind) const {
373  return &AArch64::GPR64spRegClass;
374 }
375 
376 const TargetRegisterClass *
378  if (RC == &AArch64::CCRRegClass)
379  return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
380  return RC;
381 }
382 
383 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
384 
386  const MachineFrameInfo &MFI = MF.getFrameInfo();
387 
388  // In the presence of variable sized objects or funclets, if the fixed stack
389  // size is large enough that referencing from the FP won't result in things
390  // being in range relatively often, we can use a base pointer to allow access
391  // from the other direction like the SP normally works.
392  //
393  // Furthermore, if both variable sized objects are present, and the
394  // stack needs to be dynamically re-aligned, the base pointer is the only
395  // reliable way to reference the locals.
396  if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
397  if (hasStackRealignment(MF))
398  return true;
399 
400  if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
402  // Frames that have variable sized objects and scalable SVE objects,
403  // should always use a basepointer.
404  if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
405  return true;
406  }
407 
408  // Conservatively estimate whether the negative offset from the frame
409  // pointer will be sufficient to reach. If a function has a smallish
410  // frame, it's less likely to have lots of spills and callee saved
411  // space, so it's all more likely to be within range of the frame pointer.
412  // If it's wrong, we'll materialize the constant and still get to the
413  // object; it's just suboptimal. Negative offsets use the unscaled
414  // load/store instructions, which have a 9-bit signed immediate.
415  return MFI.getLocalFrameSize() >= 256;
416  }
417 
418  return false;
419 }
420 
421 Register
423  const AArch64FrameLowering *TFI = getFrameLowering(MF);
424  return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
425 }
426 
428  const MachineFunction &MF) const {
429  return true;
430 }
431 
433  const MachineFunction &MF) const {
434  return true;
435 }
436 
437 bool
439  // This function indicates whether the emergency spillslot should be placed
440  // close to the beginning of the stackframe (closer to FP) or the end
441  // (closer to SP).
442  //
443  // The beginning works most reliably if we have a frame pointer.
444  // In the presence of any non-constant space between FP and locals,
445  // (e.g. in case of stack realignment or a scalable SVE area), it is
446  // better to use SP or BP.
447  const AArch64FrameLowering &TFI = *getFrameLowering(MF);
449  assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
450  AFI->hasCalculatedStackSizeSVE()) &&
451  "Expected SVE area to be calculated by this point");
452  return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
453 }
454 
456  const MachineFunction &MF) const {
457  return true;
458 }
459 
460 bool
462  const MachineFrameInfo &MFI = MF.getFrameInfo();
464  return true;
465  return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
466 }
467 
468 /// needsFrameBaseReg - Returns true if the instruction's frame index
469 /// reference would be better served by a base register other than FP
470 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
471 /// references it should create new base registers for.
473  int64_t Offset) const {
474  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
475  assert(i < MI->getNumOperands() &&
476  "Instr doesn't have FrameIndex operand!");
477 
478  // It's the load/store FI references that cause issues, as it can be difficult
479  // to materialize the offset if it won't fit in the literal field. Estimate
480  // based on the size of the local frame and some conservative assumptions
481  // about the rest of the stack frame (note, this is pre-regalloc, so
482  // we don't know everything for certain yet) whether this offset is likely
483  // to be out of range of the immediate. Return true if so.
484 
485  // We only generate virtual base registers for loads and stores, so
486  // return false for everything else.
487  if (!MI->mayLoad() && !MI->mayStore())
488  return false;
489 
490  // Without a virtual base register, if the function has variable sized
491  // objects, all fixed-size local references will be via the frame pointer,
492  // Approximate the offset and see if it's legal for the instruction.
493  // Note that the incoming offset is based on the SP value at function entry,
494  // so it'll be negative.
495  MachineFunction &MF = *MI->getParent()->getParent();
496  const AArch64FrameLowering *TFI = getFrameLowering(MF);
497  MachineFrameInfo &MFI = MF.getFrameInfo();
498 
499  // Estimate an offset from the frame pointer.
500  // Conservatively assume all GPR callee-saved registers get pushed.
501  // FP, LR, X19-X28, D8-D15. 64-bits each.
502  int64_t FPOffset = Offset - 16 * 20;
503  // Estimate an offset from the stack pointer.
504  // The incoming offset is relating to the SP at the start of the function,
505  // but when we access the local it'll be relative to the SP after local
506  // allocation, so adjust our SP-relative offset by that allocation size.
507  Offset += MFI.getLocalFrameSize();
508  // Assume that we'll have at least some spill slots allocated.
509  // FIXME: This is a total SWAG number. We should run some statistics
510  // and pick a real one.
511  Offset += 128; // 128 bytes of spill slots
512 
513  // If there is a frame pointer, try using it.
514  // The FP is only available if there is no dynamic realignment. We
515  // don't know for sure yet whether we'll need that, so we guess based
516  // on whether there are any local variables that would trigger it.
517  if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
518  return false;
519 
520  // If we can reference via the stack pointer or base pointer, try that.
521  // FIXME: This (and the code that resolves the references) can be improved
522  // to only disallow SP relative references in the live range of
523  // the VLA(s). In practice, it's unclear how much difference that
524  // would make, but it may be worth doing.
525  if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
526  return false;
527 
528  // If even offset 0 is illegal, we don't want a virtual base register.
529  if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
530  return false;
531 
532  // The offset likely isn't legal; we want to allocate a virtual base register.
533  return true;
534 }
535 
537  Register BaseReg,
538  int64_t Offset) const {
539  assert(MI && "Unable to get the legal offset for nil instruction.");
540  StackOffset SaveOffset = StackOffset::getFixed(Offset);
542 }
543 
544 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
545 /// at the beginning of the basic block.
546 Register
548  int FrameIdx,
549  int64_t Offset) const {
551  DebugLoc DL; // Defaults to "unknown"
552  if (Ins != MBB->end())
553  DL = Ins->getDebugLoc();
554  const MachineFunction &MF = *MBB->getParent();
555  const AArch64InstrInfo *TII =
556  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
557  const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
559  Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
560  MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
561  unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
562 
563  BuildMI(*MBB, Ins, DL, MCID, BaseReg)
564  .addFrameIndex(FrameIdx)
565  .addImm(Offset)
566  .addImm(Shifter);
567 
568  return BaseReg;
569 }
570 
572  int64_t Offset) const {
573  // ARM doesn't need the general 64-bit offsets
574  StackOffset Off = StackOffset::getFixed(Offset);
575 
576  unsigned i = 0;
577  while (!MI.getOperand(i).isFI()) {
578  ++i;
579  assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
580  }
581 
582  const MachineFunction *MF = MI.getParent()->getParent();
583  const AArch64InstrInfo *TII =
584  MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
585  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
586  assert(Done && "Unable to resolve frame index!");
587  (void)Done;
588 }
589 
590 // Create a scratch register for the frame index elimination in an instruction.
591 // This function has special handling of stack tagging loop pseudos, in which
592 // case it can also change the instruction opcode.
593 static Register
595  const AArch64InstrInfo *TII) {
596  // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
597  // replace the instruction with the writeback variant because it will now
598  // satisfy the operand constraints for it.
599  Register ScratchReg;
600  if (MI.getOpcode() == AArch64::STGloop ||
601  MI.getOpcode() == AArch64::STZGloop) {
602  assert(FIOperandNum == 3 &&
603  "Wrong frame index operand for STGloop/STZGloop");
604  unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
605  : AArch64::STZGloop_wback;
606  ScratchReg = MI.getOperand(1).getReg();
607  MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
608  MI.setDesc(TII->get(Op));
609  MI.tieOperands(1, 3);
610  } else {
611  ScratchReg =
612  MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
613  MI.getOperand(FIOperandNum)
614  .ChangeToRegister(ScratchReg, false, false, true);
615  }
616  return ScratchReg;
617 }
618 
620  const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
621  // The smallest scalable element supported by scaled SVE addressing
622  // modes are predicates, which are 2 scalable bytes in size. So the scalable
623  // byte offset must always be a multiple of 2.
624  assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
625 
626  // Add fixed-sized offset using existing DIExpression interface.
627  DIExpression::appendOffset(Ops, Offset.getFixed());
628 
629  unsigned VG = getDwarfRegNum(AArch64::VG, true);
630  int64_t VGSized = Offset.getScalable() / 2;
631  if (VGSized > 0) {
632  Ops.push_back(dwarf::DW_OP_constu);
633  Ops.push_back(VGSized);
634  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
635  Ops.push_back(dwarf::DW_OP_mul);
636  Ops.push_back(dwarf::DW_OP_plus);
637  } else if (VGSized < 0) {
638  Ops.push_back(dwarf::DW_OP_constu);
639  Ops.push_back(-VGSized);
640  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
641  Ops.push_back(dwarf::DW_OP_mul);
642  Ops.push_back(dwarf::DW_OP_minus);
643  }
644 }
645 
647  int SPAdj, unsigned FIOperandNum,
648  RegScavenger *RS) const {
649  assert(SPAdj == 0 && "Unexpected");
650 
651  MachineInstr &MI = *II;
652  MachineBasicBlock &MBB = *MI.getParent();
653  MachineFunction &MF = *MBB.getParent();
654  const MachineFrameInfo &MFI = MF.getFrameInfo();
655  const AArch64InstrInfo *TII =
656  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
657  const AArch64FrameLowering *TFI = getFrameLowering(MF);
658  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
659  bool Tagged =
660  MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
661  Register FrameReg;
662 
663  // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
664  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
665  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
666  MI.getOpcode() == TargetOpcode::STATEPOINT) {
667  StackOffset Offset =
668  TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
669  /*PreferFP=*/true,
670  /*ForSimm=*/false);
671  Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
672  MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
673  MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
674  return;
675  }
676 
677  if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
678  MachineOperand &FI = MI.getOperand(FIOperandNum);
680  assert(!Offset.getScalable() &&
681  "Frame offsets with a scalable component are not supported");
682  FI.ChangeToImmediate(Offset.getFixed());
683  return;
684  }
685 
686  StackOffset Offset;
687  if (MI.getOpcode() == AArch64::TAGPstack) {
688  // TAGPstack must use the virtual frame register in its 3rd operand.
690  FrameReg = MI.getOperand(3).getReg();
693  } else if (Tagged) {
695  MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
696  if (MFI.hasVarSizedObjects() ||
697  isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
699  // Can't update to SP + offset in place. Precalculate the tagged pointer
700  // in a scratch register.
701  Offset = TFI->resolveFrameIndexReference(
702  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
703  Register ScratchReg =
704  MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
705  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
706  TII);
707  BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
708  .addReg(ScratchReg)
709  .addReg(ScratchReg)
710  .addImm(0);
711  MI.getOperand(FIOperandNum)
712  .ChangeToRegister(ScratchReg, false, false, true);
713  return;
714  }
715  FrameReg = AArch64::SP;
717  (int64_t)MFI.getStackSize());
718  } else {
719  Offset = TFI->resolveFrameIndexReference(
720  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
721  }
722 
723  // Modify MI as necessary to handle as much of 'Offset' as possible
724  if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
725  return;
726 
727  assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
728  "Emergency spill slot is out of reach");
729 
730  // If we get here, the immediate doesn't fit into the instruction. We folded
731  // as much as possible above. Handle the rest, providing a register that is
732  // SP+LargeImm.
733  Register ScratchReg =
735  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
736 }
737 
739  MachineFunction &MF) const {
740  const AArch64FrameLowering *TFI = getFrameLowering(MF);
741 
742  switch (RC->getID()) {
743  default:
744  return 0;
745  case AArch64::GPR32RegClassID:
746  case AArch64::GPR32spRegClassID:
747  case AArch64::GPR32allRegClassID:
748  case AArch64::GPR64spRegClassID:
749  case AArch64::GPR64allRegClassID:
750  case AArch64::GPR64RegClassID:
751  case AArch64::GPR32commonRegClassID:
752  case AArch64::GPR64commonRegClassID:
753  return 32 - 1 // XZR/SP
754  - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
756  - hasBasePointer(MF); // X19
757  case AArch64::FPR8RegClassID:
758  case AArch64::FPR16RegClassID:
759  case AArch64::FPR32RegClassID:
760  case AArch64::FPR64RegClassID:
761  case AArch64::FPR128RegClassID:
762  return 32;
763 
764  case AArch64::MatrixIndexGPR32_12_15RegClassID:
765  return 4;
766 
767  case AArch64::DDRegClassID:
768  case AArch64::DDDRegClassID:
769  case AArch64::DDDDRegClassID:
770  case AArch64::QQRegClassID:
771  case AArch64::QQQRegClassID:
772  case AArch64::QQQQRegClassID:
773  return 32;
774 
775  case AArch64::FPR128_loRegClassID:
776  case AArch64::FPR64_loRegClassID:
777  case AArch64::FPR16_loRegClassID:
778  return 16;
779  }
780 }
781 
783  const MachineFunction &MF) const {
784  const auto &MFI = MF.getFrameInfo();
785  if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
786  return AArch64::SP;
787  else if (hasStackRealignment(MF))
788  return getBaseRegister();
789  return getFrameRegister(MF);
790 }
791 
792 /// SrcRC and DstRC will be morphed into NewRC if this returns true
794  MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
795  const TargetRegisterClass *DstRC, unsigned DstSubReg,
796  const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
797  if (MI->isCopy() &&
798  ((DstRC->getID() == AArch64::GPR64RegClassID) ||
799  (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
800  MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
801  // Do not coalesce in the case of a 32-bit subregister copy
802  // which implements a 32 to 64 bit zero extension
803  // which relies on the upper 32 bits being zeroed.
804  return false;
805  return true;
806 }
i
i
Definition: README.txt:29
llvm::AArch64RegisterInfo::getCallPreservedMask
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Definition: AArch64RegisterInfo.cpp:215
llvm::MachineFrameInfo::hasVarSizedObjects
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
Definition: MachineFrameInfo.h:354
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::AArch64Subtarget::isTargetWindows
bool isTargetWindows() const
Definition: AArch64Subtarget.h:245
AArch64RegisterInfo.h
createScratchRegisterForInstruction
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
Definition: AArch64RegisterInfo.cpp:594
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::TargetRegisterClass::getID
unsigned getID() const
Return the register class ID number.
Definition: TargetRegisterInfo.h:70
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::CallingConv::CFGuard_Check
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:87
AArch64MachineFunctionInfo.h
llvm::AArch64RegisterInfo::eliminateFrameIndex
void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
Definition: AArch64RegisterInfo.cpp:646
TargetFrameLowering.h
llvm::AArch64RegisterInfo::shouldCoalesce
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
Definition: AArch64RegisterInfo.cpp:793
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:35
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::AttributeList::hasAttrSomewhere
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Definition: Attributes.cpp:1386
llvm::MachineFunction::allocateRegMask
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
Definition: MachineFunction.cpp:547
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
DebugInfoMetadata.h
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::Function
Definition: Function.h:60
llvm::AArch64FrameOffsetCanUpdate
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
Definition: AArch64InstrInfo.h:428
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
AArch64GenRegisterInfo
llvm::AArch64RegisterInfo::UpdateCustomCallPreservedMask
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
Definition: AArch64RegisterInfo.cpp:271
llvm::AArch64RegisterInfo::getBaseRegister
unsigned getBaseRegister() const
Definition: AArch64RegisterInfo.cpp:383
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::AArch64RegisterInfo::isAnyArgRegReserved
bool isAnyArgRegReserved(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:348
llvm::AArch64RegisterInfo::getNoPreservedMask
const uint32_t * getNoPreservedMask() const override
Definition: AArch64RegisterInfo.cpp:291
llvm::AArch64Subtarget::isTargetDarwin
bool isTargetDarwin() const
Definition: AArch64Subtarget.h:242
llvm::AArch64RegisterInfo::getCalleeSavedRegsViaCopy
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:147
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
getDwarfRegNum
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:178
llvm::CallingConv::CXX_FAST_TLS
@ CXX_FAST_TLS
Definition: CallingConv.h:76
llvm::CallingConv::Win64
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:169
llvm::StackOffset::getFixed
ScalarTy getFixed() const
Definition: TypeSize.h:149
llvm::CallingConv::GHC
@ GHC
Definition: CallingConv.h:51
llvm::AArch64RegisterInfo::getFrameRegister
Register getFrameRegister(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:422
llvm::CallingConv::PreserveMost
@ PreserveMost
Definition: CallingConv.h:66
llvm::rewriteAArch64FrameIndex
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
Definition: AArch64InstrInfo.cpp:4656
llvm::AArch64Subtarget::isXRegCustomCalleeSaved
bool isXRegCustomCalleeSaved(size_t i) const
Definition: AArch64Subtarget.h:200
llvm::AArch64FrameLowering::hasFP
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Definition: AArch64FrameLowering.cpp:425
llvm::AArch64Subtarget::getTargetLowering
const AArch64TargetLowering * getTargetLowering() const override
Definition: AArch64Subtarget.h:168
F
#define F(x, y, z)
Definition: MD5.cpp:55
MachineRegisterInfo.h
llvm::AArch64RegisterInfo::getRegPressureLimit
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:738
llvm::RegScavenger::isScavengingFrameIndex
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Definition: RegisterScavenging.h:148
llvm::AArch64RegisterInfo::cannotEliminateFrame
bool cannotEliminateFrame(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:461
llvm::AArch64RegisterInfo::getCrossCopyRegClass
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
Definition: AArch64RegisterInfo.cpp:377
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:23
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:186
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:650
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:648
llvm::AArch64RegisterInfo::getSubClassWithSubReg
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
Definition: AArch64RegisterInfo.cpp:174
AArch64InstrInfo.h
llvm::CallingConv::AArch64_VectorCall
@ AArch64_VectorCall
Definition: CallingConv.h:239
llvm::AArch64RegisterInfo::hasSVEArgsOrReturn
static bool hasSVEArgsOrReturn(const MachineFunction *MF)
Definition: AArch64RegisterInfo.cpp:67
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:37
llvm::Triple::isOSDarwin
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:500
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:667
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:738
llvm::MachineOperand::getRegMaskSize
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Definition: MachineOperand.h:645
llvm::AArch64TargetLowering::supportSwiftError
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
Definition: AArch64ISelLowering.h:798
llvm::CallingConv::AnyReg
@ AnyReg
Definition: CallingConv.h:62
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:127
llvm::MachineOperand::ChangeToImmediate
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Definition: MachineOperand.cpp:154
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::MachineRegisterInfo::setCalleeSavedRegs
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
Definition: MachineRegisterInfo.cpp:624
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:99
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
BitVector.h
llvm::MachineFrameInfo::getStackSize
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
Definition: MachineFrameInfo.h:577
llvm::MachineFrameInfo::getObjectOffset
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Definition: MachineFrameInfo.h:518
llvm::BitVector
Definition: BitVector.h:75
llvm::AArch64RegisterInfo::materializeFrameBaseRegister
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
Definition: AArch64RegisterInfo.cpp:547
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::AArch64RegisterInfo::isFrameOffsetLegal
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:536
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:314
AArch64AddressingModes.h
llvm::TargetOptions::DisableFramePointerElim
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
Definition: TargetOptionsImpl.cpp:23
llvm::AArch64FrameOffsetIsLegal
@ AArch64FrameOffsetIsLegal
Offset is legal.
Definition: AArch64InstrInfo.h:427
llvm::AArch64FrameLowering::resolveFrameIndexReference
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
Definition: AArch64FrameLowering.cpp:2126
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:640
llvm::AArch64RegisterInfo::AArch64RegisterInfo
AArch64RegisterInfo(const Triple &TT)
Definition: AArch64RegisterInfo.cpp:39
llvm::AArch64FunctionInfo::isSplitCSR
bool isSplitCSR() const
Definition: AArch64MachineFunctionInfo.h:223
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::isAArch64FrameOffsetLegal
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
Definition: AArch64InstrInfo.cpp:4555
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:625
llvm::MachineFrameInfo::getLocalFrameSize
int64_t getLocalFrameSize() const
Get the size of the local object blob.
Definition: MachineFrameInfo.h:437
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
AArch64FrameLowering.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:238
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:388
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:38
llvm::AArch64RegisterInfo::needsFrameBaseReg
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
Definition: AArch64RegisterInfo.cpp:472
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::TargetMachine::Options
TargetOptions Options
Definition: TargetMachine.h:118
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:656
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::AArch64RegisterInfo::isConstantPhysReg
bool isConstantPhysReg(MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:366
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::AArch64Subtarget::isTargetLinux
bool isTargetLinux() const
Definition: AArch64Subtarget.h:244
llvm::AArch64RegisterInfo::useFPForScavengingIndex
bool useFPForScavengingIndex(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:438
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:4325
llvm::AArch64FrameLowering::getNonLocalFrameIndexReference
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
Definition: AArch64FrameLowering.cpp:2090
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::AArch64RegisterInfo::getCalleeSavedRegs
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Definition: AArch64RegisterInfo.cpp:76
llvm::MachineFunction
Definition: MachineFunction.h:241
llvm::AArch64RegisterInfo::getReservedRegs
BitVector getReservedRegs(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:316
Triple.h
llvm::AArch64RegisterInfo::requiresFrameIndexScavenging
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:455
TargetOptions.h
llvm::AArch64RegisterInfo::isAsmClobberable
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:361
llvm::AArch64Subtarget::isXRegisterReserved
bool isXRegisterReserved(size_t i) const
Definition: AArch64Subtarget.h:198
llvm::AArch64RegisterInfo::getLocalAddressRegister
unsigned getLocalAddressRegister(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:782
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1612
llvm::AArch64FunctionInfo::hasCalculatedStackSizeSVE
bool hasCalculatedStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:202
Dwarf.h
uint32_t
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AArch64RegisterInfo::getCustomEHPadPreservedMask
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:255
llvm::CallingConv::AArch64_SVE_VectorCall
@ AArch64_SVE_VectorCall
Calling convention between AArch64 SVE functions.
Definition: CallingConv.h:242
llvm::AArch64RegisterInfo::getThisReturnPreservedMask
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
Definition: AArch64RegisterInfo.cpp:296
llvm::AArch64RegisterInfo::emitReservedArgRegCallError
void emitReservedArgRegCallError(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:354
llvm::AArch64RegisterInfo::getDarwinCallPreservedMask
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Definition: AArch64RegisterInfo.cpp:187
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineFrameInfo::isFrameAddressTaken
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Definition: MachineFrameInfo.h:370
llvm::AArch64_MC::initLLVMToCVRegMapping
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
Definition: AArch64MCTargetDesc.cpp:65
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::DIExpression::appendOffset
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
Definition: DebugInfoMetadata.cpp:1383
llvm::AArch64Subtarget::getNumXRegisterReserved
unsigned getNumXRegisterReserved() const
Definition: AArch64Subtarget.h:199
llvm::AArch64RegisterInfo::getOffsetOpcodes
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
Definition: AArch64RegisterInfo.cpp:619
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:606
uint16_t
llvm::AArch64FunctionInfo::getStackSizeSVE
uint64_t getStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:209
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:636
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:326
MachineFrameInfo.h
DiagnosticInfo.h
Function.h
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::AArch64RegisterInfo::getWindowsStackProbePreservedMask
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
Definition: AArch64RegisterInfo.cpp:311
llvm::MachineFunction::hasEHFunclets
bool hasEHFunclets() const
Definition: MachineFunction.h:1067
llvm::MCSubRegIterator
MCSubRegIterator enumerates all sub-registers of Reg.
Definition: MCRegisterInfo.h:597
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
AArch64Subtarget.h
llvm::AArch64RegisterInfo::resolveFrameIndex
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:571
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:277
MachineInstrBuilder.h
llvm::CallingConv::SwiftTail
@ SwiftTail
SwiftTail - This follows the Swift calling convention in how arguments are passed but guarantees tail...
Definition: CallingConv.h:92
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::AArch64RegisterInfo::getPointerRegClass
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
Definition: AArch64RegisterInfo.cpp:371
llvm::MachineRegisterInfo::constrainRegClass
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Definition: MachineRegisterInfo.cpp:83
llvm::AArch64RegisterInfo::UpdateCustomCalleeSavedRegs
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:156
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
llvm::SmallVectorImpl< uint64_t >
llvm::AArch64RegisterInfo::getTLSCallPreservedMask
const uint32_t * getTLSCallPreservedMask() const
Definition: AArch64RegisterInfo.cpp:263
llvm::AArch64RegisterInfo::regNeedsCFI
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
Definition: AArch64RegisterInfo.cpp:49
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::MachineFrameInfo::adjustsStack
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
Definition: MachineFrameInfo.h:601
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:748
RegisterScavenging.h
llvm::AArch64RegisterInfo::requiresVirtualBaseRegisters
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:432
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
raw_ostream.h
llvm::AArch64RegisterInfo::hasBasePointer
bool hasBasePointer(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:385
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::AArch64RegisterInfo::requiresRegisterScavenging
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:427
llvm::AArch64RegisterInfo::getDarwinCalleeSavedRegs
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:118
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:279
SubReg
unsigned SubReg
Definition: AArch64AdvSIMDScalarPass.cpp:104
llvm::AArch64RegisterInfo::isReservedReg
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
Definition: AArch64RegisterInfo.cpp:343
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24