LLVM  9.0.0svn
AArch64CallLowering.cpp
Go to the documentation of this file.
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 
47 #define DEBUG_TYPE "aarch64-call-lowering"
48 
49 using namespace llvm;
50 
52  : CallLowering(&TLI) {}
53 
54 namespace {
55 struct IncomingArgHandler : public CallLowering::ValueHandler {
56  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57  CCAssignFn *AssignFn)
58  : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59 
60  unsigned getStackAddress(uint64_t Size, int64_t Offset,
61  MachinePointerInfo &MPO) override {
62  auto &MFI = MIRBuilder.getMF().getFrameInfo();
63  int FI = MFI.CreateFixedObject(Size, Offset, true);
64  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
65  unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
66  MIRBuilder.buildFrameIndex(AddrReg, FI);
67  StackUsed = std::max(StackUsed, Size + Offset);
68  return AddrReg;
69  }
70 
71  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
72  CCValAssign &VA) override {
73  markPhysRegUsed(PhysReg);
74  switch (VA.getLocInfo()) {
75  default:
76  MIRBuilder.buildCopy(ValVReg, PhysReg);
77  break;
78  case CCValAssign::LocInfo::SExt:
79  case CCValAssign::LocInfo::ZExt:
80  case CCValAssign::LocInfo::AExt: {
81  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
82  MIRBuilder.buildTrunc(ValVReg, Copy);
83  break;
84  }
85  }
86  }
87 
88  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
89  MachinePointerInfo &MPO, CCValAssign &VA) override {
90  // FIXME: Get alignment
91  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
93  1);
94  MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
95  }
96 
97  /// How the physical register gets marked varies between formal
98  /// parameters (it's a basic-block live-in), and a call instruction
99  /// (it's an implicit-def of the BL).
100  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101 
102  bool isArgumentHandler() const override { return true; }
103 
104  uint64_t StackUsed;
105 };
106 
107 struct FormalArgHandler : public IncomingArgHandler {
108  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
109  CCAssignFn *AssignFn)
110  : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111 
112  void markPhysRegUsed(unsigned PhysReg) override {
113  MIRBuilder.getMBB().addLiveIn(PhysReg);
114  }
115 };
116 
117 struct CallReturnHandler : public IncomingArgHandler {
118  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
120  : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
121 
122  void markPhysRegUsed(unsigned PhysReg) override {
123  MIB.addDef(PhysReg, RegState::Implicit);
124  }
125 
127 };
128 
129 struct OutgoingArgHandler : public CallLowering::ValueHandler {
130  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
131  MachineInstrBuilder MIB, CCAssignFn *AssignFn,
132  CCAssignFn *AssignFnVarArg)
133  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
134  AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
135 
136  unsigned getStackAddress(uint64_t Size, int64_t Offset,
137  MachinePointerInfo &MPO) override {
138  LLT p0 = LLT::pointer(0, 64);
139  LLT s64 = LLT::scalar(64);
140  unsigned SPReg = MRI.createGenericVirtualRegister(p0);
141  MIRBuilder.buildCopy(SPReg, AArch64::SP);
142 
143  unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
144  MIRBuilder.buildConstant(OffsetReg, Offset);
145 
146  unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
147  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
148 
149  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
150  return AddrReg;
151  }
152 
153  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
154  CCValAssign &VA) override {
155  MIB.addUse(PhysReg, RegState::Implicit);
156  unsigned ExtReg = extendRegister(ValVReg, VA);
157  MIRBuilder.buildCopy(PhysReg, ExtReg);
158  }
159 
160  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
161  MachinePointerInfo &MPO, CCValAssign &VA) override {
162  if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
163  Size = VA.getLocVT().getSizeInBits() / 8;
164  ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(Size * 8), ValVReg)
165  ->getOperand(0)
166  .getReg();
167  }
168  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
169  MPO, MachineMemOperand::MOStore, Size, 1);
170  MIRBuilder.buildStore(ValVReg, Addr, *MMO);
171  }
172 
173  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
174  CCValAssign::LocInfo LocInfo,
176  CCState &State) override {
177  bool Res;
178  if (Info.IsFixed)
179  Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
180  else
181  Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
182 
183  StackSize = State.getNextStackOffset();
184  return Res;
185  }
186 
188  CCAssignFn *AssignFnVarArg;
189  uint64_t StackSize;
190 };
191 } // namespace
192 
193 void AArch64CallLowering::splitToValueTypes(
194  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
195  const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
196  const SplitArgTy &PerformArgSplit) const {
197  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
198  LLVMContext &Ctx = OrigArg.Ty->getContext();
199 
200  if (OrigArg.Ty->isVoidTy())
201  return;
202 
203  SmallVector<EVT, 4> SplitVTs;
205  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
206 
207  if (SplitVTs.size() == 1) {
208  // No splitting to do, but we want to replace the original type (e.g. [1 x
209  // double] -> double).
210  SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
211  OrigArg.Flags, OrigArg.IsFixed);
212  return;
213  }
214 
215  unsigned FirstRegIdx = SplitArgs.size();
216  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
217  OrigArg.Ty, CallConv, false);
218  for (auto SplitVT : SplitVTs) {
219  Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
220  SplitArgs.push_back(
222  SplitTy, OrigArg.Flags, OrigArg.IsFixed});
223  if (NeedsRegBlock)
224  SplitArgs.back().Flags.setInConsecutiveRegs();
225  }
226 
227  SplitArgs.back().Flags.setInConsecutiveRegsLast();
228 
229  for (unsigned i = 0; i < Offsets.size(); ++i)
230  PerformArgSplit(SplitArgs[FirstRegIdx + i].Reg, Offsets[i] * 8);
231 }
232 
234  const Value *Val,
235  ArrayRef<unsigned> VRegs,
236  unsigned SwiftErrorVReg) const {
237  auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
238  assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
239  "Return value without a vreg");
240 
241  bool Success = true;
242  if (!VRegs.empty()) {
243  MachineFunction &MF = MIRBuilder.getMF();
244  const Function &F = MF.getFunction();
245 
246  MachineRegisterInfo &MRI = MF.getRegInfo();
247  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
248  CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
249  auto &DL = F.getParent()->getDataLayout();
250  LLVMContext &Ctx = Val->getType()->getContext();
251 
252  SmallVector<EVT, 4> SplitEVTs;
253  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
254  assert(VRegs.size() == SplitEVTs.size() &&
255  "For each split Type there should be exactly one VReg.");
256 
257  SmallVector<ArgInfo, 8> SplitArgs;
259 
260  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
261  if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
262  LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
263  return false;
264  }
265 
266  unsigned CurVReg = VRegs[i];
267  ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
268  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
269 
270  // i1 is a special case because SDAG i1 true is naturally zero extended
271  // when widened using ANYEXT. We need to do it explicitly here.
272  if (MRI.getType(CurVReg).getSizeInBits() == 1) {
273  CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
274  } else {
275  // Some types will need extending as specified by the CC.
276  MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
277  if (EVT(NewVT) != SplitEVTs[i]) {
278  unsigned ExtendOp = TargetOpcode::G_ANYEXT;
280  Attribute::SExt))
281  ExtendOp = TargetOpcode::G_SEXT;
283  Attribute::ZExt))
284  ExtendOp = TargetOpcode::G_ZEXT;
285 
286  LLT NewLLT(NewVT);
287  LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
288  CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
289  // Instead of an extend, we might have a vector type which needs
290  // padding with more elements, e.g. <2 x half> -> <4 x half>.
291  if (NewVT.isVector()) {
292  if (OldLLT.isVector()) {
293  if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
294  // We don't handle VA types which are not exactly twice the
295  // size, but can easily be done in future.
296  if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
297  LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
298  return false;
299  }
300  auto Undef = MIRBuilder.buildUndef({OldLLT});
301  CurVReg =
302  MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef.getReg(0)})
303  .getReg(0);
304  } else {
305  // Just do a vector extend.
306  CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
307  .getReg(0);
308  }
309  } else if (NewLLT.getNumElements() == 2) {
310  // We need to pad a <1 x S> type to <2 x S>. Since we don't have
311  // <1 x S> vector types in GISel we use a build_vector instead
312  // of a vector merge/concat.
313  auto Undef = MIRBuilder.buildUndef({OldLLT});
314  CurVReg =
315  MIRBuilder
316  .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
317  .getReg(0);
318  } else {
319  LLVM_DEBUG(dbgs() << "Could not handle ret ty");
320  return false;
321  }
322  } else {
323  // A scalar extend.
324  CurVReg =
325  MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
326  }
327  }
328  }
329  if (CurVReg != CurArgInfo.Reg) {
330  CurArgInfo.Reg = CurVReg;
331  // Reset the arg flags after modifying CurVReg.
332  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
333  }
334  splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC,
335  [&](unsigned Reg, uint64_t Offset) {
336  MIRBuilder.buildExtract(Reg, CurVReg, Offset);
337  });
338  }
339 
340  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
341  Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
342  }
343 
344  if (SwiftErrorVReg) {
345  MIB.addUse(AArch64::X21, RegState::Implicit);
346  MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
347  }
348 
349  MIRBuilder.insertInstr(MIB);
350  return Success;
351 }
352 
354  const Function &F,
355  ArrayRef<unsigned> VRegs) const {
356  MachineFunction &MF = MIRBuilder.getMF();
357  MachineBasicBlock &MBB = MIRBuilder.getMBB();
358  MachineRegisterInfo &MRI = MF.getRegInfo();
359  auto &DL = F.getParent()->getDataLayout();
360 
361  SmallVector<ArgInfo, 8> SplitArgs;
362  unsigned i = 0;
363  for (auto &Arg : F.args()) {
364  if (DL.getTypeStoreSize(Arg.getType()) == 0)
365  continue;
366  ArgInfo OrigArg{VRegs[i], Arg.getType()};
367  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
368  bool Split = false;
369  LLT Ty = MRI.getType(VRegs[i]);
370  unsigned Dst = VRegs[i];
371 
372  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
373  [&](unsigned Reg, uint64_t Offset) {
374  if (!Split) {
375  Split = true;
376  Dst = MRI.createGenericVirtualRegister(Ty);
377  MIRBuilder.buildUndef(Dst);
378  }
379  unsigned Tmp = MRI.createGenericVirtualRegister(Ty);
380  MIRBuilder.buildInsert(Tmp, Dst, Reg, Offset);
381  Dst = Tmp;
382  });
383 
384  if (Dst != VRegs[i])
385  MIRBuilder.buildCopy(VRegs[i], Dst);
386  ++i;
387  }
388 
389  if (!MBB.empty())
390  MIRBuilder.setInstr(*MBB.begin());
391 
392  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
393  CCAssignFn *AssignFn =
394  TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
395 
396  FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
397  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
398  return false;
399 
400  if (F.isVarArg()) {
402  // FIXME: we need to reimplement saveVarArgsRegisters from
403  // AArch64ISelLowering.
404  return false;
405  }
406 
407  // We currently pass all varargs at 8-byte alignment.
408  uint64_t StackOffset = alignTo(Handler.StackUsed, 8);
409 
410  auto &MFI = MIRBuilder.getMF().getFrameInfo();
412  FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
413  }
414 
415  auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
416  if (Subtarget.hasCustomCallingConv())
418 
419  // Move back to the end of the basic block.
420  MIRBuilder.setMBB(MBB);
421 
422  return true;
423 }
424 
426  CallingConv::ID CallConv,
427  const MachineOperand &Callee,
428  const ArgInfo &OrigRet,
429  ArrayRef<ArgInfo> OrigArgs,
430  unsigned SwiftErrorVReg) const {
431  MachineFunction &MF = MIRBuilder.getMF();
432  const Function &F = MF.getFunction();
433  MachineRegisterInfo &MRI = MF.getRegInfo();
434  auto &DL = F.getParent()->getDataLayout();
435 
436  SmallVector<ArgInfo, 8> SplitArgs;
437  for (auto &OrigArg : OrigArgs) {
438  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
439  [&](unsigned Reg, uint64_t Offset) {
440  MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
441  });
442  // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
443  if (OrigArg.Ty->isIntegerTy(1))
444  SplitArgs.back().Flags.setZExt();
445  }
446 
447  // Find out which ABI gets to decide where things go.
448  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
449  CCAssignFn *AssignFnFixed =
450  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
451  CCAssignFn *AssignFnVarArg =
452  TLI.CCAssignFnForCall(CallConv, /*IsVarArg=*/true);
453 
454  auto CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
455 
456  // Create a temporarily-floating call instruction so we can add the implicit
457  // uses of arg registers.
458  auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR
459  : AArch64::BL);
460  MIB.add(Callee);
461 
462  // Tell the call which registers are clobbered.
463  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
464  const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
466  TRI->UpdateCustomCallPreservedMask(MF, &Mask);
467  MIB.addRegMask(Mask);
468 
469  if (TRI->isAnyArgRegReserved(MF))
470  TRI->emitReservedArgRegCallError(MF);
471 
472  // Do the actual argument marshalling.
473  SmallVector<unsigned, 8> PhysRegs;
474  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
475  AssignFnVarArg);
476  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
477  return false;
478 
479  // Now we can add the actual call instruction to the correct basic block.
480  MIRBuilder.insertInstr(MIB);
481 
482  // If Callee is a reg, since it is used by a target specific
483  // instruction, it must have a register class matching the
484  // constraint of that instruction.
485  if (Callee.isReg())
487  MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
488  *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
489 
490  // Finally we can copy the returned value back into its virtual-register. In
491  // symmetry with the arugments, the physical register must be an
492  // implicit-define of the call instruction.
493  CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
494  if (OrigRet.Reg) {
495  SplitArgs.clear();
496 
497  SmallVector<uint64_t, 8> RegOffsets;
498  SmallVector<unsigned, 8> SplitRegs;
499  splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
500  [&](unsigned Reg, uint64_t Offset) {
501  RegOffsets.push_back(Offset);
502  SplitRegs.push_back(Reg);
503  });
504 
505  CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
506  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
507  return false;
508 
509  if (!RegOffsets.empty())
510  MIRBuilder.buildSequence(OrigRet.Reg, SplitRegs, RegOffsets);
511  }
512 
513  if (SwiftErrorVReg) {
514  MIB.addDef(AArch64::X21, RegState::Implicit);
515  MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21);
516  }
517 
518  CallSeqStart.addImm(Handler.StackSize).addImm(0);
519  MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
520  .addImm(Handler.StackSize)
521  .addImm(0);
522 
523  return true;
524 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
unsigned getReg(unsigned Idx) const
Get the register for the operand index.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, const MachineOperand &RegMO, unsigned OpIdx)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:40
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasCustomCallingConv() const
bool isVector() const
Return true if this is a vector value type.
unsigned getReg() const
getReg - Returns the register number.
unsigned Reg
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs, unsigned SwiftErrorVReg) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1050
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
unsigned const TargetRegisterInfo * TRI
F(f)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
Definition: MathExtras.h:684
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert `Res0, ...
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool isVector() const
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This file contains the simple types necessary to represent the attributes associated with functions a...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
const AArch64RegisterInfo * getRegisterInfo() const override
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
virtual const TargetInstrInfo * getInstrInfo() const
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
Helper class to build MachineInstr.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, unsigned Op, unsigned Index)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:288
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:64
This class contains a discriminated union of information about pointers in memory operands...
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
CCState - This class holds information needed while lowering arguments and return values...
void buildSequence(unsigned Res, ArrayRef< unsigned > Ops, ArrayRef< uint64_t > Indices)
Build and insert instructions to put Ops together at the specified p Indices to form a larger registe...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CCValAssign - Represent assignment of one arg/retval to a location.
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
The memory access always returns the same value (or traps).
uint32_t Size
Definition: Profile.cpp:46
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:72
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:444
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
AArch64CallLowering(const AArch64TargetLowering &TLI)
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< unsigned > VRegs, unsigned SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
iterator_range< arg_iterator > args()
Definition: Function.h:705
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143