LLVM  10.0.0svn
MipsCallLowering.cpp
Go to the documentation of this file.
1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
21 
22 using namespace llvm;
23 
25  : CallLowering(&TLI) {}
26 
27 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
28  const EVT &VT) {
29  if (VA.isRegLoc()) {
30  assignValueToReg(VReg, VA, VT);
31  } else if (VA.isMemLoc()) {
32  assignValueToAddress(VReg, VA);
33  } else {
34  return false;
35  }
36  return true;
37 }
38 
40  ArrayRef<CCValAssign> ArgLocs,
41  unsigned ArgLocsStartIndex,
42  const EVT &VT) {
43  for (unsigned i = 0; i < VRegs.size(); ++i)
44  if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
45  return false;
46  return true;
47 }
48 
51  if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
52  std::reverse(VRegs.begin(), VRegs.end());
53 }
54 
58  unsigned SplitLength;
59  const Function &F = MIRBuilder.getMF().getFunction();
60  const DataLayout &DL = F.getParent()->getDataLayout();
61  const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
62  MIRBuilder.getMF().getSubtarget().getTargetLowering());
63 
64  for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
65  ++ArgsIndex, ArgLocsIndex += SplitLength) {
66  EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
67  SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
68  F.getCallingConv(), VT);
69  assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet");
70 
71  if (SplitLength > 1) {
72  VRegs.clear();
73  MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
74  F.getContext(), F.getCallingConv(), VT);
75  for (unsigned i = 0; i < SplitLength; ++i)
76  VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
77 
78  if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0],
79  VT))
80  return false;
81  } else {
82  if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT))
83  return false;
84  }
85  }
86  return true;
87 }
88 
89 namespace {
90 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
91 public:
92  IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
93  : MipsHandler(MIRBuilder, MRI) {}
94 
95 private:
96  void assignValueToReg(Register ValVReg, const CCValAssign &VA,
97  const EVT &VT) override;
98 
99  Register getStackAddress(const CCValAssign &VA,
100  MachineMemOperand *&MMO) override;
101 
102  void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
103 
104  bool handleSplit(SmallVectorImpl<Register> &VRegs,
105  ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
106  Register ArgsReg, const EVT &VT) override;
107 
108  virtual void markPhysRegUsed(unsigned PhysReg) {
109  MIRBuilder.getMRI()->addLiveIn(PhysReg);
110  MIRBuilder.getMBB().addLiveIn(PhysReg);
111  }
112 
113  void buildLoad(Register Val, const CCValAssign &VA) {
114  MachineMemOperand *MMO;
115  Register Addr = getStackAddress(VA, MMO);
116  MIRBuilder.buildLoad(Val, Addr, *MMO);
117  }
118 };
119 
120 class CallReturnHandler : public IncomingValueHandler {
121 public:
122  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
123  MachineInstrBuilder &MIB)
124  : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
125 
126 private:
127  void markPhysRegUsed(unsigned PhysReg) override {
128  MIB.addDef(PhysReg, RegState::Implicit);
129  }
130 
131  MachineInstrBuilder &MIB;
132 };
133 
134 } // end anonymous namespace
135 
136 void IncomingValueHandler::assignValueToReg(Register ValVReg,
137  const CCValAssign &VA,
138  const EVT &VT) {
139  const MipsSubtarget &STI =
140  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
141  Register PhysReg = VA.getLocReg();
142  if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
143  const MipsSubtarget &STI =
144  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
145 
146  MIRBuilder
147  .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64
149  .addDef(ValVReg)
150  .addUse(PhysReg + (STI.isLittle() ? 0 : 1))
151  .addUse(PhysReg + (STI.isLittle() ? 1 : 0))
152  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
153  *STI.getRegBankInfo());
154  markPhysRegUsed(PhysReg);
155  markPhysRegUsed(PhysReg + 1);
156  } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
157  MIRBuilder.buildInstr(Mips::MTC1)
158  .addDef(ValVReg)
159  .addUse(PhysReg)
160  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
161  *STI.getRegBankInfo());
162  markPhysRegUsed(PhysReg);
163  } else {
164  switch (VA.getLocInfo()) {
165  case CCValAssign::LocInfo::SExt:
166  case CCValAssign::LocInfo::ZExt:
167  case CCValAssign::LocInfo::AExt: {
168  auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
169  MIRBuilder.buildTrunc(ValVReg, Copy);
170  break;
171  }
172  default:
173  MIRBuilder.buildCopy(ValVReg, PhysReg);
174  break;
175  }
176  markPhysRegUsed(PhysReg);
177  }
178 }
179 
180 Register IncomingValueHandler::getStackAddress(const CCValAssign &VA,
181  MachineMemOperand *&MMO) {
182  MachineFunction &MF = MIRBuilder.getMF();
183  unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
184  unsigned Offset = VA.getLocMemOffset();
185  MachineFrameInfo &MFI = MF.getFrameInfo();
186 
187  int FI = MFI.CreateFixedObject(Size, Offset, true);
188  MachinePointerInfo MPO =
189  MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
190 
192  unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
193  MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
194 
195  Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
196  MIRBuilder.buildFrameIndex(AddrReg, FI);
197 
198  return AddrReg;
199 }
200 
201 void IncomingValueHandler::assignValueToAddress(Register ValVReg,
202  const CCValAssign &VA) {
203  if (VA.getLocInfo() == CCValAssign::SExt ||
204  VA.getLocInfo() == CCValAssign::ZExt ||
205  VA.getLocInfo() == CCValAssign::AExt) {
206  Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
207  buildLoad(LoadReg, VA);
208  MIRBuilder.buildTrunc(ValVReg, LoadReg);
209  } else
210  buildLoad(ValVReg, VA);
211 }
212 
213 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
214  ArrayRef<CCValAssign> ArgLocs,
215  unsigned ArgLocsStartIndex,
216  Register ArgsReg, const EVT &VT) {
217  if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
218  return false;
219  setLeastSignificantFirst(VRegs);
220  MIRBuilder.buildMerge(ArgsReg, VRegs);
221  return true;
222 }
223 
224 namespace {
225 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
226 public:
227  OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
228  MachineInstrBuilder &MIB)
229  : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
230 
231 private:
232  void assignValueToReg(Register ValVReg, const CCValAssign &VA,
233  const EVT &VT) override;
234 
235  Register getStackAddress(const CCValAssign &VA,
236  MachineMemOperand *&MMO) override;
237 
238  void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
239 
240  bool handleSplit(SmallVectorImpl<Register> &VRegs,
241  ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
242  Register ArgsReg, const EVT &VT) override;
243 
244  Register extendRegister(Register ValReg, const CCValAssign &VA);
245 
246  MachineInstrBuilder &MIB;
247 };
248 } // end anonymous namespace
249 
250 void OutgoingValueHandler::assignValueToReg(Register ValVReg,
251  const CCValAssign &VA,
252  const EVT &VT) {
253  Register PhysReg = VA.getLocReg();
254  const MipsSubtarget &STI =
255  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
256 
257  if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
258  MIRBuilder
259  .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
261  .addDef(PhysReg + (STI.isLittle() ? 1 : 0))
262  .addUse(ValVReg)
263  .addImm(1)
264  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
265  *STI.getRegBankInfo());
266  MIRBuilder
267  .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
269  .addDef(PhysReg + (STI.isLittle() ? 0 : 1))
270  .addUse(ValVReg)
271  .addImm(0)
272  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
273  *STI.getRegBankInfo());
274  } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
275  MIRBuilder.buildInstr(Mips::MFC1)
276  .addDef(PhysReg)
277  .addUse(ValVReg)
278  .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
279  *STI.getRegBankInfo());
280  } else {
281  Register ExtReg = extendRegister(ValVReg, VA);
282  MIRBuilder.buildCopy(PhysReg, ExtReg);
283  MIB.addUse(PhysReg, RegState::Implicit);
284  }
285 }
286 
287 Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
288  MachineMemOperand *&MMO) {
289  MachineFunction &MF = MIRBuilder.getMF();
291 
292  LLT p0 = LLT::pointer(0, 32);
293  LLT s32 = LLT::scalar(32);
294  Register SPReg = MRI.createGenericVirtualRegister(p0);
295  MIRBuilder.buildCopy(SPReg, Register(Mips::SP));
296 
297  Register OffsetReg = MRI.createGenericVirtualRegister(s32);
298  unsigned Offset = VA.getLocMemOffset();
299  MIRBuilder.buildConstant(OffsetReg, Offset);
300 
301  Register AddrReg = MRI.createGenericVirtualRegister(p0);
302  MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
303 
304  MachinePointerInfo MPO =
305  MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
306  unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
307  unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
308  MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
309 
310  return AddrReg;
311 }
312 
313 void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
314  const CCValAssign &VA) {
315  MachineMemOperand *MMO;
316  Register Addr = getStackAddress(VA, MMO);
317  Register ExtReg = extendRegister(ValVReg, VA);
318  MIRBuilder.buildStore(ExtReg, Addr, *MMO);
319 }
320 
321 Register OutgoingValueHandler::extendRegister(Register ValReg,
322  const CCValAssign &VA) {
323  LLT LocTy{VA.getLocVT()};
324  switch (VA.getLocInfo()) {
325  case CCValAssign::SExt: {
326  Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
327  MIRBuilder.buildSExt(ExtReg, ValReg);
328  return ExtReg;
329  }
330  case CCValAssign::ZExt: {
331  Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
332  MIRBuilder.buildZExt(ExtReg, ValReg);
333  return ExtReg;
334  }
335  case CCValAssign::AExt: {
336  Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
337  MIRBuilder.buildAnyExt(ExtReg, ValReg);
338  return ExtReg;
339  }
340  // TODO : handle upper extends
341  case CCValAssign::Full:
342  return ValReg;
343  default:
344  break;
345  }
346  llvm_unreachable("unable to extend register");
347 }
348 
349 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
350  ArrayRef<CCValAssign> ArgLocs,
351  unsigned ArgLocsStartIndex,
352  Register ArgsReg, const EVT &VT) {
353  MIRBuilder.buildUnmerge(VRegs, ArgsReg);
354  setLeastSignificantFirst(VRegs);
355  if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
356  return false;
357 
358  return true;
359 }
360 
362  if (T->isIntegerTy())
363  return true;
364  if (T->isPointerTy())
365  return true;
366  if (T->isFloatingPointTy())
367  return true;
368  return false;
369 }
370 
371 static bool isSupportedReturnType(Type *T) {
372  if (T->isIntegerTy())
373  return true;
374  if (T->isPointerTy())
375  return true;
376  if (T->isFloatingPointTy())
377  return true;
378  if (T->isAggregateType())
379  return true;
380  return false;
381 }
382 
383 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
384  const ISD::ArgFlagsTy &Flags) {
385  // > does not mean loss of information as type RegisterVT can't hold type VT,
386  // it means that type VT is split into multiple registers of type RegisterVT
387  if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
389  if (Flags.isSExt())
390  return CCValAssign::LocInfo::SExt;
391  if (Flags.isZExt())
392  return CCValAssign::LocInfo::ZExt;
393  return CCValAssign::LocInfo::AExt;
394 }
395 
396 template <typename T>
398  const SmallVectorImpl<T> &Arguments) {
399  for (unsigned i = 0; i < ArgLocs.size(); ++i) {
400  const CCValAssign &VA = ArgLocs[i];
402  Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
403  if (VA.isMemLoc())
404  ArgLocs[i] =
406  VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
407  else
408  ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
409  VA.getLocReg(), VA.getLocVT(), LocInfo);
410  }
411 }
412 
414  const Value *Val,
415  ArrayRef<Register> VRegs) const {
416 
417  MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
418 
419  if (Val != nullptr && !isSupportedReturnType(Val->getType()))
420  return false;
421 
422  if (!VRegs.empty()) {
423  MachineFunction &MF = MIRBuilder.getMF();
424  const Function &F = MF.getFunction();
425  const DataLayout &DL = MF.getDataLayout();
426  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
427 
428  SmallVector<ArgInfo, 8> RetInfos;
429  SmallVector<unsigned, 8> OrigArgIndices;
430 
431  ArgInfo ArgRetInfo(VRegs, Val->getType());
432  setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F);
433  splitToValueTypes(DL, ArgRetInfo, 0, RetInfos, OrigArgIndices);
434 
436  subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
437 
439  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
440  F.getContext());
441  CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
442  setLocInfo(ArgLocs, Outs);
443 
444  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
445  if (!RetHandler.handle(ArgLocs, RetInfos)) {
446  return false;
447  }
448  }
449  MIRBuilder.insertInstr(Ret);
450  return true;
451 }
452 
454  MachineIRBuilder &MIRBuilder, const Function &F,
455  ArrayRef<ArrayRef<Register>> VRegs) const {
456 
457  // Quick exit if there aren't any args.
458  if (F.arg_empty())
459  return true;
460 
461  for (auto &Arg : F.args()) {
462  if (!isSupportedArgumentType(Arg.getType()))
463  return false;
464  }
465 
466  MachineFunction &MF = MIRBuilder.getMF();
467  const DataLayout &DL = MF.getDataLayout();
468  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
469 
470  SmallVector<ArgInfo, 8> ArgInfos;
471  SmallVector<unsigned, 8> OrigArgIndices;
472  unsigned i = 0;
473  for (auto &Arg : F.args()) {
474  ArgInfo AInfo(VRegs[i], Arg.getType());
475  setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
476  ArgInfos.push_back(AInfo);
477  OrigArgIndices.push_back(i);
478  ++i;
479  }
480 
482  subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
483 
485  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
486  F.getContext());
487 
488  const MipsTargetMachine &TM =
489  static_cast<const MipsTargetMachine &>(MF.getTarget());
490  const MipsABIInfo &ABI = TM.getABI();
491  CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
492  1);
493  CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
494  setLocInfo(ArgLocs, Ins);
495 
496  IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
497  if (!Handler.handle(ArgLocs, ArgInfos))
498  return false;
499 
500  if (F.isVarArg()) {
501  ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
502  unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
503 
504  int VaArgOffset;
505  unsigned RegSize = 4;
506  if (ArgRegs.size() == Idx)
507  VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize);
508  else {
509  VaArgOffset =
510  (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) -
511  (int)(RegSize * (ArgRegs.size() - Idx));
512  }
513 
514  MachineFrameInfo &MFI = MF.getFrameInfo();
515  int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true);
516  MF.getInfo<MipsFunctionInfo>()->setVarArgsFrameIndex(FI);
517 
518  for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) {
519  MIRBuilder.getMBB().addLiveIn(ArgRegs[I]);
520 
521  MachineInstrBuilder Copy =
522  MIRBuilder.buildCopy(LLT::scalar(RegSize * 8), Register(ArgRegs[I]));
523  FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true);
526  MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI);
527  MachineMemOperand *MMO =
529  /* Alignment */ RegSize);
530  MIRBuilder.buildStore(Copy, FrameIndex, *MMO);
531  }
532  }
533 
534  return true;
535 }
536 
538  CallLoweringInfo &Info) const {
539 
540  if (Info.CallConv != CallingConv::C)
541  return false;
542 
543  for (auto &Arg : Info.OrigArgs) {
544  if (!isSupportedArgumentType(Arg.Ty))
545  return false;
546  if (Arg.Flags[0].isByVal())
547  return false;
548  if (Arg.Flags[0].isSRet() && !Arg.Ty->isPointerTy())
549  return false;
550  }
551 
552  if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedReturnType(Info.OrigRet.Ty))
553  return false;
554 
555  MachineFunction &MF = MIRBuilder.getMF();
556  const Function &F = MF.getFunction();
557  const DataLayout &DL = MF.getDataLayout();
558  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
559  const MipsTargetMachine &TM =
560  static_cast<const MipsTargetMachine &>(MF.getTarget());
561  const MipsABIInfo &ABI = TM.getABI();
562 
563  MachineInstrBuilder CallSeqStart =
564  MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
565 
566  const bool IsCalleeGlobalPIC =
567  Info.Callee.isGlobal() && TM.isPositionIndependent();
568 
569  MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(
570  Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
571  MIB.addDef(Mips::SP, RegState::Implicit);
572  if (IsCalleeGlobalPIC) {
573  Register CalleeReg =
575  MachineInstr *CalleeGlobalValue =
576  MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal());
577  if (!Info.Callee.getGlobal()->hasLocalLinkage())
578  CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL);
579  MIB.addUse(CalleeReg);
580  } else
581  MIB.add(Info.Callee);
584 
585  TargetLowering::ArgListTy FuncOrigArgs;
586  FuncOrigArgs.reserve(Info.OrigArgs.size());
587 
588  SmallVector<ArgInfo, 8> ArgInfos;
589  SmallVector<unsigned, 8> OrigArgIndices;
590  unsigned i = 0;
591  for (auto &Arg : Info.OrigArgs) {
592 
594  Entry.Ty = Arg.Ty;
595  FuncOrigArgs.push_back(Entry);
596 
597  ArgInfos.push_back(Arg);
598  OrigArgIndices.push_back(i);
599  ++i;
600  }
601 
603  subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
604 
606  bool IsCalleeVarArg = false;
607  if (Info.Callee.isGlobal()) {
608  const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal());
609  IsCalleeVarArg = CF->isVarArg();
610  }
611  MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs,
612  F.getContext());
613 
615  const char *Call =
616  Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
617  CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
618  setLocInfo(ArgLocs, Outs);
619 
620  OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
621  if (!RetHandler.handle(ArgLocs, ArgInfos)) {
622  return false;
623  }
624 
625  unsigned NextStackOffset = CCInfo.getNextStackOffset();
626  const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
627  unsigned StackAlignment = TFL->getStackAlignment();
628  NextStackOffset = alignTo(NextStackOffset, StackAlignment);
629  CallSeqStart.addImm(NextStackOffset).addImm(0);
630 
631  if (IsCalleeGlobalPIC) {
632  MIRBuilder.buildCopy(
633  Register(Mips::GP),
635  MIB.addDef(Mips::GP, RegState::Implicit);
636  }
637  MIRBuilder.insertInstr(MIB);
638  if (MIB->getOpcode() == Mips::JALRPseudo) {
639  const MipsSubtarget &STI =
640  static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
641  MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
642  *STI.getRegBankInfo());
643  }
644 
645  if (!Info.OrigRet.Ty->isVoidTy()) {
646  ArgInfos.clear();
647  SmallVector<unsigned, 8> OrigRetIndices;
648 
649  splitToValueTypes(DL, Info.OrigRet, 0, ArgInfos, OrigRetIndices);
650 
652  subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
653 
655  MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
656  F.getContext());
657 
658  CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, Call);
659  setLocInfo(ArgLocs, Ins);
660 
661  CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
662  if (!Handler.handle(ArgLocs, ArgInfos))
663  return false;
664  }
665 
666  MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
667 
668  return true;
669 }
670 
671 template <typename T>
672 void MipsCallLowering::subTargetRegTypeForCallingConv(
674  ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
675  const DataLayout &DL = F.getParent()->getDataLayout();
676  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
677 
678  unsigned ArgNo = 0;
679  for (auto &Arg : Args) {
680 
681  EVT VT = TLI.getValueType(DL, Arg.Ty);
682  MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
683  F.getCallingConv(), VT);
684  unsigned NumRegs = TLI.getNumRegistersForCallingConv(
685  F.getContext(), F.getCallingConv(), VT);
686 
687  for (unsigned i = 0; i < NumRegs; ++i) {
688  ISD::ArgFlagsTy Flags = Arg.Flags[0];
689 
690  if (i == 0)
692  else
693  Flags.setOrigAlign(Align::None());
694 
695  ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
696  0);
697  }
698  ++ArgNo;
699  }
700 }
701 
702 void MipsCallLowering::splitToValueTypes(
703  const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex,
704  SmallVectorImpl<ArgInfo> &SplitArgs,
705  SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
706 
707  SmallVector<EVT, 4> SplitEVTs;
708  SmallVector<Register, 4> SplitVRegs;
709  const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
710  LLVMContext &Ctx = OrigArg.Ty->getContext();
711 
712  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitEVTs);
713 
714  for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
715  ArgInfo Info = ArgInfo{OrigArg.Regs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
716  Info.Flags = OrigArg.Flags;
717  SplitArgs.push_back(Info);
718  SplitArgsOrigIndices.push_back(OriginalIndex);
719  }
720 }
const RegisterBankInfo * getRegBankInfo() const override
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:176
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
MachineOperand Callee
Destination of the call.
Definition: CallLowering.h:77
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Register getLocReg() const
void setTargetFlags(unsigned F)
static bool isSupportedReturnType(Type *T)
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
bool assignVRegs(ArrayRef< Register > VRegs, ArrayRef< CCValAssign > ArgLocs, unsigned ArgLocsStartIndex, const EVT &VT)
unsigned getValNo() const
unsigned const TargetRegisterInfo * TRI
F(f)
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:53
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
bool isMemLoc() const
unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
A description of a memory reference used in the backend.
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Definition: Type.h:162
CCAssignFn * CCAssignFnForCall() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const
Obtain the size of the area allocated by the callee for arguments.
Definition: MipsABIInfo.cpp:48
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:410
const char * getSymbolName() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
LocInfo getLocInfo() const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
static bool isSupportedArgumentType(Type *T)
unsigned getSizeInBits() const
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
Definition: STLExtras.h:261
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:246
bool arg_empty() const
Definition: Function.h:734
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don&#39;t insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn, const Type *RetTy, const char *Func)
Definition: MipsCCState.h:119
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
MipsCallLowering(const MipsTargetLowering &TLI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:119
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
Definition: MathExtras.h:661
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineRegisterInfo * getMRI()
Getter for MRI.
MO_GOT_CALL - Represents the offset into the global offset table at which the address of a call site ...
Definition: MipsBaseInfo.h:43
unsigned const MachineRegisterInfo * MRI
Machine Value Type.
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register >> VRegs) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs...
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:148
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
const GlobalValue * getGlobal() const
Helper class to build MachineInstr.
AMDGPU Lower Kernel Arguments
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
static void setLocInfo(SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< T > &Arguments)
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:205
std::vector< ArgListEntry > ArgListTy
Extended Value Type.
Definition: ValueTypes.h:33
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
static constexpr const Align None()
Returns a default constructed Align which corresponds to no alignment.
Definition: Alignment.h:93
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs) const override
This hook behaves as the extended lowerReturn function, but for targets that do not support swifterro...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
size_t size() const
Definition: SmallVector.h:52
This class contains a discriminated union of information about pointers in memory operands...
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
The memory access writes data.
SmallVector< ArgInfo, 8 > OrigArgs
List of descriptors of the arguments passed to the function.
Definition: CallLowering.h:83
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
bool isLittle() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const MipsRegisterInfo * getRegisterInfo() const override
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
CCValAssign - Represent assignment of one arg/retval to a location.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Information about stack frame layout on the target.
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:258
Promote Memory to Register
Definition: Mem2Reg.cpp:109
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const TargetInstrInfo & getTII()
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void setLeastSignificantFirst(SmallVectorImpl< Register > &VRegs)
This file declares the MachineIRBuilder class.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
Definition: MachineInstr.h:63
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
Definition: MipsCCState.h:130
ArrayRef< MCPhysReg > GetVarArgRegs() const
The registers to use for the variable argument list.
Definition: MipsABIInfo.cpp:40
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:163
unsigned AllocateStack(unsigned Size, unsigned Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getLocMemOffset() const
CallingConv::ID CallConv
Calling convention to be used for the call.
Definition: CallLowering.h:73
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
#define I(x, y, z)
Definition: MD5.cpp:58
virtual const TargetFrameLowering * getFrameLowering() const
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
uint32_t Size
Definition: Profile.cpp:46
static CCValAssign getMem(unsigned ValNo, MVT ValVT, unsigned Offset, MVT LocVT, LocInfo HTP)
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
This file describes how to lower LLVM calls to machine code calls.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegLoc() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
LLVM Value Representation.
Definition: Value.h:74
CCAssignFn * CCAssignFnForReturn() const
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:47
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, const ISD::ArgFlagsTy &Flags)
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Align getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override
Return the correct alignment for the current calling convention.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:415
bool isFP64bit() const
ArgInfo OrigRet
Descriptor for the return type of the function.
Definition: CallLowering.h:80
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool handle(ArrayRef< CCValAssign > ArgLocs, ArrayRef< CallLowering::ArgInfo > Args)
iterator_range< arg_iterator > args()
Definition: Function.h:724
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:143