LLVM  10.0.0svn
AArch64CallLowering.cpp
Go to the documentation of this file.
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 
47 #define DEBUG_TYPE "aarch64-call-lowering"
48 
49 using namespace llvm;
50 
52  : CallLowering(&TLI) {}
53 
54 namespace {
55 struct IncomingArgHandler : public CallLowering::ValueHandler {
56  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57  CCAssignFn *AssignFn)
58  : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59 
60  Register getStackAddress(uint64_t Size, int64_t Offset,
61  MachinePointerInfo &MPO) override {
62  auto &MFI = MIRBuilder.getMF().getFrameInfo();
63  int FI = MFI.CreateFixedObject(Size, Offset, true);
64  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
66  MIRBuilder.buildFrameIndex(AddrReg, FI);
67  StackUsed = std::max(StackUsed, Size + Offset);
68  return AddrReg;
69  }
70 
71  void assignValueToReg(Register ValVReg, Register PhysReg,
72  CCValAssign &VA) override {
73  markPhysRegUsed(PhysReg);
74  switch (VA.getLocInfo()) {
75  default:
76  MIRBuilder.buildCopy(ValVReg, PhysReg);
77  break;
78  case CCValAssign::LocInfo::SExt:
79  case CCValAssign::LocInfo::ZExt:
80  case CCValAssign::LocInfo::AExt: {
81  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
82  MIRBuilder.buildTrunc(ValVReg, Copy);
83  break;
84  }
85  }
86  }
87 
88  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
89  MachinePointerInfo &MPO, CCValAssign &VA) override {
90  // FIXME: Get alignment
91  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
93  1);
94  MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
95  }
96 
97  /// How the physical register gets marked varies between formal
98  /// parameters (it's a basic-block live-in), and a call instruction
99  /// (it's an implicit-def of the BL).
100  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101 
102  bool isIncomingArgumentHandler() const override { return true; }
103 
104  uint64_t StackUsed;
105 };
106 
107 struct FormalArgHandler : public IncomingArgHandler {
108  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
109  CCAssignFn *AssignFn)
110  : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111 
112  void markPhysRegUsed(unsigned PhysReg) override {
113  MIRBuilder.getMRI()->addLiveIn(PhysReg);
114  MIRBuilder.getMBB().addLiveIn(PhysReg);
115  }
116 };
117 
118 struct CallReturnHandler : public IncomingArgHandler {
119  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
120  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
121  : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
122 
123  void markPhysRegUsed(unsigned PhysReg) override {
124  MIB.addDef(PhysReg, RegState::Implicit);
125  }
126 
128 };
129 
130 struct OutgoingArgHandler : public CallLowering::ValueHandler {
131  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
132  MachineInstrBuilder MIB, CCAssignFn *AssignFn,
133  CCAssignFn *AssignFnVarArg)
134  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
135  AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
136 
137  Register getStackAddress(uint64_t Size, int64_t Offset,
138  MachinePointerInfo &MPO) override {
139  LLT p0 = LLT::pointer(0, 64);
140  LLT s64 = LLT::scalar(64);
141  Register SPReg = MRI.createGenericVirtualRegister(p0);
142  MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
143 
144  Register OffsetReg = MRI.createGenericVirtualRegister(s64);
145  MIRBuilder.buildConstant(OffsetReg, Offset);
146 
147  Register AddrReg = MRI.createGenericVirtualRegister(p0);
148  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
149 
150  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
151  return AddrReg;
152  }
153 
154  void assignValueToReg(Register ValVReg, Register PhysReg,
155  CCValAssign &VA) override {
156  MIB.addUse(PhysReg, RegState::Implicit);
157  Register ExtReg = extendRegister(ValVReg, VA);
158  MIRBuilder.buildCopy(PhysReg, ExtReg);
159  }
160 
161  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
162  MachinePointerInfo &MPO, CCValAssign &VA) override {
163  if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
164  Size = VA.getLocVT().getSizeInBits() / 8;
165  ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(Size * 8), ValVReg)
166  ->getOperand(0)
167  .getReg();
168  }
169  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
170  MPO, MachineMemOperand::MOStore, Size, 1);
171  MIRBuilder.buildStore(ValVReg, Addr, *MMO);
172  }
173 
174  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
175  CCValAssign::LocInfo LocInfo,
177  CCState &State) override {
178  bool Res;
179  if (Info.IsFixed)
180  Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
181  else
182  Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
183 
184  StackSize = State.getNextStackOffset();
185  return Res;
186  }
187 
189  CCAssignFn *AssignFnVarArg;
190  uint64_t StackSize;
191 };
192 } // namespace
193 
194 void AArch64CallLowering::splitToValueTypes(
195  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
196  const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
197  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
198  LLVMContext &Ctx = OrigArg.Ty->getContext();
199 
200  if (OrigArg.Ty->isVoidTy())
201  return;
202 
203  SmallVector<EVT, 4> SplitVTs;
205  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
206 
207  if (SplitVTs.size() == 1) {
208  // No splitting to do, but we want to replace the original type (e.g. [1 x
209  // double] -> double).
210  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
211  OrigArg.Flags, OrigArg.IsFixed);
212  return;
213  }
214 
215  // Create one ArgInfo for each virtual register in the original ArgInfo.
216  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
217 
218  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
219  OrigArg.Ty, CallConv, false);
220  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
221  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
222  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
223  OrigArg.IsFixed);
224  if (NeedsRegBlock)
225  SplitArgs.back().Flags.setInConsecutiveRegs();
226  }
227 
228  SplitArgs.back().Flags.setInConsecutiveRegsLast();
229 }
230 
232  const Value *Val,
233  ArrayRef<Register> VRegs,
234  Register SwiftErrorVReg) const {
235  auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
236  assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
237  "Return value without a vreg");
238 
239  bool Success = true;
240  if (!VRegs.empty()) {
241  MachineFunction &MF = MIRBuilder.getMF();
242  const Function &F = MF.getFunction();
243 
244  MachineRegisterInfo &MRI = MF.getRegInfo();
245  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
246  CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
247  auto &DL = F.getParent()->getDataLayout();
248  LLVMContext &Ctx = Val->getType()->getContext();
249 
250  SmallVector<EVT, 4> SplitEVTs;
251  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
252  assert(VRegs.size() == SplitEVTs.size() &&
253  "For each split Type there should be exactly one VReg.");
254 
255  SmallVector<ArgInfo, 8> SplitArgs;
257 
258  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
259  if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
260  LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
261  return false;
262  }
263 
264  Register CurVReg = VRegs[i];
265  ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
266  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
267 
268  // i1 is a special case because SDAG i1 true is naturally zero extended
269  // when widened using ANYEXT. We need to do it explicitly here.
270  if (MRI.getType(CurVReg).getSizeInBits() == 1) {
271  CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
272  } else {
273  // Some types will need extending as specified by the CC.
274  MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
275  if (EVT(NewVT) != SplitEVTs[i]) {
276  unsigned ExtendOp = TargetOpcode::G_ANYEXT;
278  Attribute::SExt))
279  ExtendOp = TargetOpcode::G_SEXT;
281  Attribute::ZExt))
282  ExtendOp = TargetOpcode::G_ZEXT;
283 
284  LLT NewLLT(NewVT);
285  LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
286  CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
287  // Instead of an extend, we might have a vector type which needs
288  // padding with more elements, e.g. <2 x half> -> <4 x half>.
289  if (NewVT.isVector()) {
290  if (OldLLT.isVector()) {
291  if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
292  // We don't handle VA types which are not exactly twice the
293  // size, but can easily be done in future.
294  if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
295  LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
296  return false;
297  }
298  auto Undef = MIRBuilder.buildUndef({OldLLT});
299  CurVReg =
300  MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef.getReg(0)})
301  .getReg(0);
302  } else {
303  // Just do a vector extend.
304  CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
305  .getReg(0);
306  }
307  } else if (NewLLT.getNumElements() == 2) {
308  // We need to pad a <1 x S> type to <2 x S>. Since we don't have
309  // <1 x S> vector types in GISel we use a build_vector instead
310  // of a vector merge/concat.
311  auto Undef = MIRBuilder.buildUndef({OldLLT});
312  CurVReg =
313  MIRBuilder
314  .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
315  .getReg(0);
316  } else {
317  LLVM_DEBUG(dbgs() << "Could not handle ret ty");
318  return false;
319  }
320  } else {
321  // A scalar extend.
322  CurVReg =
323  MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
324  }
325  }
326  }
327  if (CurVReg != CurArgInfo.Regs[0]) {
328  CurArgInfo.Regs[0] = CurVReg;
329  // Reset the arg flags after modifying CurVReg.
330  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
331  }
332  splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
333  }
334 
335  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
336  Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
337  }
338 
339  if (SwiftErrorVReg) {
340  MIB.addUse(AArch64::X21, RegState::Implicit);
341  MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
342  }
343 
344  MIRBuilder.insertInstr(MIB);
345  return Success;
346 }
347 
349  MachineIRBuilder &MIRBuilder, const Function &F,
350  ArrayRef<ArrayRef<Register>> VRegs) const {
351  MachineFunction &MF = MIRBuilder.getMF();
352  MachineBasicBlock &MBB = MIRBuilder.getMBB();
353  MachineRegisterInfo &MRI = MF.getRegInfo();
354  auto &DL = F.getParent()->getDataLayout();
355 
356  SmallVector<ArgInfo, 8> SplitArgs;
357  unsigned i = 0;
358  for (auto &Arg : F.args()) {
359  if (DL.getTypeStoreSize(Arg.getType()) == 0)
360  continue;
361 
362  ArgInfo OrigArg{VRegs[i], Arg.getType()};
363  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
364 
365  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
366  ++i;
367  }
368 
369  if (!MBB.empty())
370  MIRBuilder.setInstr(*MBB.begin());
371 
372  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
373  CCAssignFn *AssignFn =
374  TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
375 
376  FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
377  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
378  return false;
379 
380  if (F.isVarArg()) {
382  // FIXME: we need to reimplement saveVarArgsRegisters from
383  // AArch64ISelLowering.
384  return false;
385  }
386 
387  // We currently pass all varargs at 8-byte alignment.
388  uint64_t StackOffset = alignTo(Handler.StackUsed, 8);
389 
390  auto &MFI = MIRBuilder.getMF().getFrameInfo();
392  FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
393  }
394 
395  auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
396  if (Subtarget.hasCustomCallingConv())
398 
399  // Move back to the end of the basic block.
400  MIRBuilder.setMBB(MBB);
401 
402  return true;
403 }
404 
406  CallLoweringInfo &Info) const {
407  MachineFunction &MF = MIRBuilder.getMF();
408  const Function &F = MF.getFunction();
409  MachineRegisterInfo &MRI = MF.getRegInfo();
410  auto &DL = F.getParent()->getDataLayout();
411 
412  SmallVector<ArgInfo, 8> SplitArgs;
413  for (auto &OrigArg : Info.OrigArgs) {
414  splitToValueTypes(OrigArg, SplitArgs, DL, MRI, Info.CallConv);
415  // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
416  if (OrigArg.Ty->isIntegerTy(1))
417  SplitArgs.back().Flags.setZExt();
418  }
419 
420  // Find out which ABI gets to decide where things go.
421  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
422  CCAssignFn *AssignFnFixed =
423  TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/false);
424  CCAssignFn *AssignFnVarArg =
425  TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/true);
426 
427  auto CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
428 
429  // Create a temporarily-floating call instruction so we can add the implicit
430  // uses of arg registers.
431  auto MIB = MIRBuilder.buildInstrNoInsert(Info.Callee.isReg() ? AArch64::BLR
432  : AArch64::BL);
433  MIB.add(Info.Callee);
434 
435  // Tell the call which registers are clobbered.
436  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
437  const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
439  TRI->UpdateCustomCallPreservedMask(MF, &Mask);
440  MIB.addRegMask(Mask);
441 
442  if (TRI->isAnyArgRegReserved(MF))
443  TRI->emitReservedArgRegCallError(MF);
444 
445  // Do the actual argument marshalling.
446  SmallVector<unsigned, 8> PhysRegs;
447  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
448  AssignFnVarArg);
449  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
450  return false;
451 
452  // Now we can add the actual call instruction to the correct basic block.
453  MIRBuilder.insertInstr(MIB);
454 
455  // If Callee is a reg, since it is used by a target specific
456  // instruction, it must have a register class matching the
457  // constraint of that instruction.
458  if (Info.Callee.isReg())
460  MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
461  *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
462  0));
463 
464  // Finally we can copy the returned value back into its virtual-register. In
465  // symmetry with the arugments, the physical register must be an
466  // implicit-define of the call instruction.
467  CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
468  if (!Info.OrigRet.Ty->isVoidTy()) {
469  SplitArgs.clear();
470 
471  splitToValueTypes(Info.OrigRet, SplitArgs, DL, MRI, F.getCallingConv());
472 
473  CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
474  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
475  return false;
476  }
477 
478  if (Info.SwiftErrorVReg) {
479  MIB.addDef(AArch64::X21, RegState::Implicit);
480  MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
481  }
482 
483  CallSeqStart.addImm(Handler.StackSize).addImm(0);
484  MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
485  .addImm(Handler.StackSize)
486  .addImm(0);
487 
488  return true;
489 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
MachineOperand Callee
Destination of the call.
Definition: CallLowering.h:70
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
StackOffset is a wrapper around scalable and non-scalable offsets and is used in several functions su...
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, const MachineOperand &RegMO, unsigned OpIdx)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:40
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasCustomCallingConv() const
Register getReg(unsigned Idx) const
Get the register for the operand index.
bool isVector() const
Return true if this is a vector value type.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1100
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
unsigned const TargetRegisterInfo * TRI
F(f)
Register SwiftErrorVReg
Valid if the call has a swifterror inout parameter, and contains the vreg that the swifterror should ...
Definition: CallLowering.h:80
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register >> VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs...
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isVector() const
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:261
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:196
This file contains the simple types necessary to represent the attributes associated with functions a...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
const AArch64RegisterInfo * getRegisterInfo() const override
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
void setReg(Register Reg)
Change the register this operand corresponds to.
virtual const TargetInstrInfo * getInstrInfo() const
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineRegisterInfo * getMRI()
Getter for MRI.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
Helper class to build MachineInstr.
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, Register SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:442
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
MachineInstrBuilder buildGEP(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_GEP Op0, Op1.
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:91
This class contains a discriminated union of information about pointers in memory operands...
The memory access writes data.
SmallVector< ArgInfo, 8 > OrigArgs
List of descriptors of the arguments passed to the function.
Definition: CallLowering.h:76
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CCValAssign - Represent assignment of one arg/retval to a location.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Promote Memory to Register
Definition: Mem2Reg.cpp:109
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
#define Success
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:126
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
CallingConv::ID CallConv
Calling convention to be used for the call.
Definition: CallLowering.h:66
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
The memory access always returns the same value (or traps).
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
uint32_t Size
Definition: Profile.cpp:46
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:73
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:445
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
AArch64CallLowering(const AArch64TargetLowering &TLI)
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:47
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Register getReg() const
getReg - Returns the register number.
#define LLVM_DEBUG(X)
Definition: Debug.h:122
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:416
ArgInfo OrigRet
Descriptor for the return type of the function.
Definition: CallLowering.h:73
iterator_range< arg_iterator > args()
Definition: Function.h:719
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143