LLVM  9.0.0svn
ARMCallLowering.cpp
Go to the documentation of this file.
1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "ARMCallLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMISelLowering.h"
18 #include "ARMSubtarget.h"
19 #include "Utils/ARMBaseInfo.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <utility>
49 
50 using namespace llvm;
51 
53  : CallLowering(&TLI) {}
54 
55 static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
56  Type *T) {
57  if (T->isArrayTy())
58  return isSupportedType(DL, TLI, T->getArrayElementType());
59 
60  if (T->isStructTy()) {
61  // For now we only allow homogeneous structs that we can manipulate with
62  // G_MERGE_VALUES and G_UNMERGE_VALUES
63  auto StructT = cast<StructType>(T);
64  for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i)
65  if (StructT->getElementType(i) != StructT->getElementType(0))
66  return false;
67  return isSupportedType(DL, TLI, StructT->getElementType(0));
68  }
69 
70  EVT VT = TLI.getValueType(DL, T, true);
71  if (!VT.isSimple() || VT.isVector() ||
72  !(VT.isInteger() || VT.isFloatingPoint()))
73  return false;
74 
75  unsigned VTSize = VT.getSimpleVT().getSizeInBits();
76 
77  if (VTSize == 64)
78  // FIXME: Support i64 too
79  return VT.isFloatingPoint();
80 
81  return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
82 }
83 
84 namespace {
85 
86 /// Helper class for values going out through an ABI boundary (used for handling
87 /// function return values and call parameters).
88 struct OutgoingValueHandler : public CallLowering::ValueHandler {
89  OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
90  MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
91  : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
92 
93  unsigned getStackAddress(uint64_t Size, int64_t Offset,
94  MachinePointerInfo &MPO) override {
95  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
96  "Unsupported size");
97 
98  LLT p0 = LLT::pointer(0, 32);
99  LLT s32 = LLT::scalar(32);
100  unsigned SPReg = MRI.createGenericVirtualRegister(p0);
101  MIRBuilder.buildCopy(SPReg, ARM::SP);
102 
103  unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
104  MIRBuilder.buildConstant(OffsetReg, Offset);
105 
106  unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
107  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
108 
109  MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
110  return AddrReg;
111  }
112 
113  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
114  CCValAssign &VA) override {
115  assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
116  assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
117 
118  assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
119  assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
120 
121  unsigned ExtReg = extendRegister(ValVReg, VA);
122  MIRBuilder.buildCopy(PhysReg, ExtReg);
123  MIB.addUse(PhysReg, RegState::Implicit);
124  }
125 
126  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
127  MachinePointerInfo &MPO, CCValAssign &VA) override {
128  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
129  "Unsupported size");
130 
131  unsigned ExtReg = extendRegister(ValVReg, VA);
132  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
134  /* Alignment */ 1);
135  MIRBuilder.buildStore(ExtReg, Addr, *MMO);
136  }
137 
138  unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
139  ArrayRef<CCValAssign> VAs) override {
140  CCValAssign VA = VAs[0];
141  assert(VA.needsCustom() && "Value doesn't need custom handling");
142  assert(VA.getValVT() == MVT::f64 && "Unsupported type");
143 
144  CCValAssign NextVA = VAs[1];
145  assert(NextVA.needsCustom() && "Value doesn't need custom handling");
146  assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
147 
148  assert(VA.getValNo() == NextVA.getValNo() &&
149  "Values belong to different arguments");
150 
151  assert(VA.isRegLoc() && "Value should be in reg");
152  assert(NextVA.isRegLoc() && "Value should be in reg");
153 
154  unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
156  MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
157 
158  bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
159  if (!IsLittle)
160  std::swap(NewRegs[0], NewRegs[1]);
161 
162  assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
163  assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
164 
165  return 1;
166  }
167 
168  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
169  CCValAssign::LocInfo LocInfo,
170  const CallLowering::ArgInfo &Info, CCState &State) override {
171  if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State))
172  return true;
173 
174  StackSize =
175  std::max(StackSize, static_cast<uint64_t>(State.getNextStackOffset()));
176  return false;
177  }
178 
179  MachineInstrBuilder &MIB;
180  uint64_t StackSize = 0;
181 };
182 
183 } // end anonymous namespace
184 
185 void ARMCallLowering::splitToValueTypes(
186  const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
187  MachineFunction &MF, const SplitArgTy &PerformArgSplit) const {
188  const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>();
189  LLVMContext &Ctx = OrigArg.Ty->getContext();
190  const DataLayout &DL = MF.getDataLayout();
192  const Function &F = MF.getFunction();
193 
194  SmallVector<EVT, 4> SplitVTs;
195  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, nullptr, nullptr, 0);
196 
197  if (SplitVTs.size() == 1) {
198  // Even if there is no splitting to do, we still want to replace the
199  // original type (e.g. pointer type -> integer).
200  auto Flags = OrigArg.Flags;
201  unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty);
202  Flags.setOrigAlign(OriginalAlignment);
203  SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx), Flags,
204  OrigArg.IsFixed);
205  return;
206  }
207 
208  for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) {
209  EVT SplitVT = SplitVTs[i];
210  Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
211  auto Flags = OrigArg.Flags;
212 
213  unsigned OriginalAlignment = DL.getABITypeAlignment(SplitTy);
214  Flags.setOrigAlign(OriginalAlignment);
215 
216  bool NeedsConsecutiveRegisters =
218  SplitTy, F.getCallingConv(), F.isVarArg());
219  if (NeedsConsecutiveRegisters) {
220  Flags.setInConsecutiveRegs();
221  if (i == e - 1)
222  Flags.setInConsecutiveRegsLast();
223  }
224 
225  unsigned PartReg =
226  MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL));
227  SplitArgs.push_back(ArgInfo{PartReg, SplitTy, Flags, OrigArg.IsFixed});
228  PerformArgSplit(PartReg);
229  }
230 }
231 
232 /// Lower the return value for the already existing \p Ret. This assumes that
233 /// \p MIRBuilder's insertion point is correct.
234 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
235  const Value *Val, ArrayRef<unsigned> VRegs,
236  MachineInstrBuilder &Ret) const {
237  if (!Val)
238  // Nothing to do here.
239  return true;
240 
241  auto &MF = MIRBuilder.getMF();
242  const auto &F = MF.getFunction();
243 
244  auto DL = MF.getDataLayout();
245  auto &TLI = *getTLI<ARMTargetLowering>();
246  if (!isSupportedType(DL, TLI, Val->getType()))
247  return false;
248 
249  SmallVector<EVT, 4> SplitEVTs;
250  ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
251  assert(VRegs.size() == SplitEVTs.size() &&
252  "For each split Type there should be exactly one VReg.");
253 
254  SmallVector<ArgInfo, 4> SplitVTs;
255  LLVMContext &Ctx = Val->getType()->getContext();
256  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
257  ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx));
258  setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
259 
261  splitToValueTypes(CurArgInfo, SplitVTs, MF,
262  [&](unsigned Reg) { Regs.push_back(Reg); });
263  if (Regs.size() > 1)
264  MIRBuilder.buildUnmerge(Regs, VRegs[i]);
265  }
266 
267  CCAssignFn *AssignFn =
268  TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
269 
270  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn);
271  return handleAssignments(MIRBuilder, SplitVTs, RetHandler);
272 }
273 
275  const Value *Val,
276  ArrayRef<unsigned> VRegs) const {
277  assert(!Val == VRegs.empty() && "Return value without a vreg");
278 
279  auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
280  unsigned Opcode = ST.getReturnOpcode();
281  auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
282 
283  if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
284  return false;
285 
286  MIRBuilder.insertInstr(Ret);
287  return true;
288 }
289 
290 namespace {
291 
292 /// Helper class for values coming in through an ABI boundary (used for handling
293 /// formal arguments and call return values).
294 struct IncomingValueHandler : public CallLowering::ValueHandler {
295  IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
296  CCAssignFn AssignFn)
297  : ValueHandler(MIRBuilder, MRI, AssignFn) {}
298 
299  bool isArgumentHandler() const override { return true; }
300 
301  unsigned getStackAddress(uint64_t Size, int64_t Offset,
302  MachinePointerInfo &MPO) override {
303  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
304  "Unsupported size");
305 
306  auto &MFI = MIRBuilder.getMF().getFrameInfo();
307 
308  int FI = MFI.CreateFixedObject(Size, Offset, true);
309  MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
310 
311  unsigned AddrReg =
313  MIRBuilder.buildFrameIndex(AddrReg, FI);
314 
315  return AddrReg;
316  }
317 
318  void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
319  MachinePointerInfo &MPO, CCValAssign &VA) override {
320  assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
321  "Unsupported size");
322 
323  if (VA.getLocInfo() == CCValAssign::SExt ||
324  VA.getLocInfo() == CCValAssign::ZExt) {
325  // If the value is zero- or sign-extended, its size becomes 4 bytes, so
326  // that's what we should load.
327  Size = 4;
328  assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
329 
330  auto LoadVReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
331  buildLoad(LoadVReg, Addr, Size, /* Alignment */ 1, MPO);
332  MIRBuilder.buildTrunc(ValVReg, LoadVReg);
333  } else {
334  // If the value is not extended, a simple load will suffice.
335  buildLoad(ValVReg, Addr, Size, /* Alignment */ 1, MPO);
336  }
337  }
338 
339  void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment,
340  MachinePointerInfo &MPO) {
341  auto MMO = MIRBuilder.getMF().getMachineMemOperand(
342  MPO, MachineMemOperand::MOLoad, Size, Alignment);
343  MIRBuilder.buildLoad(Val, Addr, *MMO);
344  }
345 
346  void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
347  CCValAssign &VA) override {
348  assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
349  assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
350 
351  auto ValSize = VA.getValVT().getSizeInBits();
352  auto LocSize = VA.getLocVT().getSizeInBits();
353 
354  assert(ValSize <= 64 && "Unsupported value size");
355  assert(LocSize <= 64 && "Unsupported location size");
356 
357  markPhysRegUsed(PhysReg);
358  if (ValSize == LocSize) {
359  MIRBuilder.buildCopy(ValVReg, PhysReg);
360  } else {
361  assert(ValSize < LocSize && "Extensions not supported");
362 
363  // We cannot create a truncating copy, nor a trunc of a physical register.
364  // Therefore, we need to copy the content of the physical register into a
365  // virtual one and then truncate that.
366  auto PhysRegToVReg =
368  MIRBuilder.buildCopy(PhysRegToVReg, PhysReg);
369  MIRBuilder.buildTrunc(ValVReg, PhysRegToVReg);
370  }
371  }
372 
373  unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg,
374  ArrayRef<CCValAssign> VAs) override {
375  CCValAssign VA = VAs[0];
376  assert(VA.needsCustom() && "Value doesn't need custom handling");
377  assert(VA.getValVT() == MVT::f64 && "Unsupported type");
378 
379  CCValAssign NextVA = VAs[1];
380  assert(NextVA.needsCustom() && "Value doesn't need custom handling");
381  assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
382 
383  assert(VA.getValNo() == NextVA.getValNo() &&
384  "Values belong to different arguments");
385 
386  assert(VA.isRegLoc() && "Value should be in reg");
387  assert(NextVA.isRegLoc() && "Value should be in reg");
388 
389  unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
391 
392  assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
393  assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
394 
395  bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
396  if (!IsLittle)
397  std::swap(NewRegs[0], NewRegs[1]);
398 
399  MIRBuilder.buildMerge(Arg.Reg, NewRegs);
400 
401  return 1;
402  }
403 
404  /// Marking a physical register as used is different between formal
405  /// parameters, where it's a basic block live-in, and call returns, where it's
406  /// an implicit-def of the call instruction.
407  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
408 };
409 
410 struct FormalArgHandler : public IncomingValueHandler {
411  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
412  CCAssignFn AssignFn)
413  : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
414 
415  void markPhysRegUsed(unsigned PhysReg) override {
416  MIRBuilder.getMBB().addLiveIn(PhysReg);
417  }
418 };
419 
420 } // end anonymous namespace
421 
423  const Function &F,
424  ArrayRef<unsigned> VRegs) const {
425  auto &TLI = *getTLI<ARMTargetLowering>();
426  auto Subtarget = TLI.getSubtarget();
427 
428  if (Subtarget->isThumb1Only())
429  return false;
430 
431  // Quick exit if there aren't any args
432  if (F.arg_empty())
433  return true;
434 
435  if (F.isVarArg())
436  return false;
437 
438  auto &MF = MIRBuilder.getMF();
439  auto &MBB = MIRBuilder.getMBB();
440  auto DL = MF.getDataLayout();
441 
442  for (auto &Arg : F.args()) {
443  if (!isSupportedType(DL, TLI, Arg.getType()))
444  return false;
445  if (Arg.hasByValOrInAllocaAttr())
446  return false;
447  }
448 
449  CCAssignFn *AssignFn =
450  TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
451 
452  FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
453  AssignFn);
454 
455  SmallVector<ArgInfo, 8> ArgInfos;
456  SmallVector<unsigned, 4> SplitRegs;
457  unsigned Idx = 0;
458  for (auto &Arg : F.args()) {
459  ArgInfo AInfo(VRegs[Idx], Arg.getType());
460  setArgFlags(AInfo, Idx + AttributeList::FirstArgIndex, DL, F);
461 
462  SplitRegs.clear();
463 
464  splitToValueTypes(AInfo, ArgInfos, MF,
465  [&](unsigned Reg) { SplitRegs.push_back(Reg); });
466 
467  if (!SplitRegs.empty())
468  MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
469 
470  Idx++;
471  }
472 
473  if (!MBB.empty())
474  MIRBuilder.setInstr(*MBB.begin());
475 
476  if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
477  return false;
478 
479  // Move back to the end of the basic block.
480  MIRBuilder.setMBB(MBB);
481  return true;
482 }
483 
484 namespace {
485 
486 struct CallReturnHandler : public IncomingValueHandler {
487  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
488  MachineInstrBuilder MIB, CCAssignFn *AssignFn)
489  : IncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
490 
491  void markPhysRegUsed(unsigned PhysReg) override {
492  MIB.addDef(PhysReg, RegState::Implicit);
493  }
494 
496 };
497 
498 // FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
499 unsigned getCallOpcode(const ARMSubtarget &STI, bool isDirect) {
500  if (isDirect)
501  return STI.isThumb() ? ARM::tBL : ARM::BL;
502 
503  if (STI.isThumb())
504  return ARM::tBLXr;
505 
506  if (STI.hasV5TOps())
507  return ARM::BLX;
508 
509  if (STI.hasV4TOps())
510  return ARM::BX_CALL;
511 
512  return ARM::BMOVPCRX_CALL;
513 }
514 } // end anonymous namespace
515 
517  CallingConv::ID CallConv,
518  const MachineOperand &Callee,
519  const ArgInfo &OrigRet,
520  ArrayRef<ArgInfo> OrigArgs) const {
521  MachineFunction &MF = MIRBuilder.getMF();
522  const auto &TLI = *getTLI<ARMTargetLowering>();
523  const auto &DL = MF.getDataLayout();
524  const auto &STI = MF.getSubtarget<ARMSubtarget>();
525  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
527 
528  if (STI.genLongCalls())
529  return false;
530 
531  if (STI.isThumb1Only())
532  return false;
533 
534  auto CallSeqStart = MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN);
535 
536  // Create the call instruction so we can add the implicit uses of arg
537  // registers, but don't insert it yet.
538  bool IsDirect = !Callee.isReg();
539  auto CallOpcode = getCallOpcode(STI, IsDirect);
540  auto MIB = MIRBuilder.buildInstrNoInsert(CallOpcode);
541 
542  bool IsThumb = STI.isThumb();
543  if (IsThumb)
544  MIB.add(predOps(ARMCC::AL));
545 
546  MIB.add(Callee);
547  if (!IsDirect) {
548  auto CalleeReg = Callee.getReg();
549  if (CalleeReg && !TRI->isPhysicalRegister(CalleeReg)) {
550  unsigned CalleeIdx = IsThumb ? 2 : 0;
551  MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass(
552  MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
553  *MIB.getInstr(), MIB->getDesc(), Callee, CalleeIdx));
554  }
555  }
556 
557  MIB.addRegMask(TRI->getCallPreservedMask(MF, CallConv));
558 
559  bool IsVarArg = false;
560  SmallVector<ArgInfo, 8> ArgInfos;
561  for (auto Arg : OrigArgs) {
562  if (!isSupportedType(DL, TLI, Arg.Ty))
563  return false;
564 
565  if (!Arg.IsFixed)
566  IsVarArg = true;
567 
568  if (Arg.Flags.isByVal())
569  return false;
570 
572  splitToValueTypes(Arg, ArgInfos, MF,
573  [&](unsigned Reg) { Regs.push_back(Reg); });
574 
575  if (Regs.size() > 1)
576  MIRBuilder.buildUnmerge(Regs, Arg.Reg);
577  }
578 
579  auto ArgAssignFn = TLI.CCAssignFnForCall(CallConv, IsVarArg);
580  OutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn);
581  if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
582  return false;
583 
584  // Now we can add the actual call instruction to the correct basic block.
585  MIRBuilder.insertInstr(MIB);
586 
587  if (!OrigRet.Ty->isVoidTy()) {
588  if (!isSupportedType(DL, TLI, OrigRet.Ty))
589  return false;
590 
591  ArgInfos.clear();
592  SmallVector<unsigned, 8> SplitRegs;
593  splitToValueTypes(OrigRet, ArgInfos, MF,
594  [&](unsigned Reg) { SplitRegs.push_back(Reg); });
595 
596  auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg);
597  CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
598  if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler))
599  return false;
600 
601  if (!SplitRegs.empty()) {
602  // We have split the value and allocated each individual piece, now build
603  // it up again.
604  MIRBuilder.buildMerge(OrigRet.Reg, SplitRegs);
605  }
606  }
607 
608  // We now know the size of the stack - update the ADJCALLSTACKDOWN
609  // accordingly.
610  CallSeqStart.addImm(ArgHandler.StackSize).addImm(0).add(predOps(ARMCC::AL));
611 
612  MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
613  .addImm(ArgHandler.StackSize)
614  .addImm(0)
615  .add(predOps(ARMCC::AL));
616 
617  return true;
618 }
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ...
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0, unsigned Op1)
Build and insert Res = G_GEP Op0, Op1.
unsigned constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, const MachineOperand &RegMO, unsigned OpIdx)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition: Utils.cpp:40
bool isThumb() const
Definition: ARMSubtarget.h:749
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool hasV4TOps() const
Definition: ARMSubtarget.h:563
bool isScalar() const
unsigned getReg() const
getReg - Returns the register number.
unsigned Reg
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:252
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
LLT getType(unsigned Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register...
unsigned getValNo() const
unsigned const TargetRegisterInfo * TRI
F(f)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool needsCustom() const
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
bool handleAssignments(MachineIRBuilder &MIRBuilder, ArrayRef< ArgInfo > Args, ValueHandler &Handler) const
Invoke Handler::assignArg on each of the given Args and then use Callback to move them to the assigne...
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
ARMCallLowering(const ARMTargetLowering &TLI)
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
This file contains the simple types necessary to represent the attributes associated with functions a...
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
unsigned getSizeInBits() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool arg_empty() const
Definition: Function.h:715
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< unsigned > VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by Args...
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
Helper class to build MachineInstr.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:64
This class contains a discriminated union of information about pointers in memory operands...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
The memory access writes data.
unsigned createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef< unsigned > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:749
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< unsigned > VRegs) const override
This hook behaves as the extended lowerReturn function, but for targets that do not support swifterro...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
This file declares the MachineIRBuilder class.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Definition: ARMSubtarget.h:841
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
uint32_t Size
Definition: Profile.cpp:46
static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI, Type *T)
bool hasV5TOps() const
Definition: ARMSubtarget.h:564
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegLoc() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef< ArgInfo > OrigArgs) const override
This hook behaves as the extended lowerCall function, but for targets that do not support swifterror ...
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
LLVM Value Representation.
Definition: Value.h:72
MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
Type * getArrayElementType() const
Definition: Type.h:364
const MachineInstrBuilder & addDef(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
This file describes how to lower LLVM calls to machine code calls.
unsigned getLocReg() const
iterator_range< arg_iterator > args()
Definition: Function.h:705
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:217
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:220