LLVM  14.0.0git
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
36  const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37  if (AttrFn(Attribute::SExt))
38  Flags.setSExt();
39  if (AttrFn(Attribute::ZExt))
40  Flags.setZExt();
41  if (AttrFn(Attribute::InReg))
42  Flags.setInReg();
43  if (AttrFn(Attribute::StructRet))
44  Flags.setSRet();
45  if (AttrFn(Attribute::Nest))
46  Flags.setNest();
47  if (AttrFn(Attribute::ByVal))
48  Flags.setByVal();
49  if (AttrFn(Attribute::Preallocated))
50  Flags.setPreallocated();
51  if (AttrFn(Attribute::InAlloca))
52  Flags.setInAlloca();
53  if (AttrFn(Attribute::Returned))
54  Flags.setReturned();
55  if (AttrFn(Attribute::SwiftSelf))
56  Flags.setSwiftSelf();
57  if (AttrFn(Attribute::SwiftAsync))
58  Flags.setSwiftAsync();
59  if (AttrFn(Attribute::SwiftError))
60  Flags.setSwiftError();
61 }
62 
64  unsigned ArgIdx) const {
65  ISD::ArgFlagsTy Flags;
66  addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
67  return Call.paramHasAttr(ArgIdx, Attr);
68  });
69  return Flags;
70 }
71 
73  const AttributeList &Attrs,
74  unsigned OpIdx) const {
75  addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
76  return Attrs.hasAttributeAtIndex(OpIdx, Attr);
77  });
78 }
79 
81  ArrayRef<Register> ResRegs,
83  Register SwiftErrorVReg,
84  std::function<unsigned()> GetCalleeReg) const {
86  const DataLayout &DL = MIRBuilder.getDataLayout();
87  MachineFunction &MF = MIRBuilder.getMF();
88  bool CanBeTailCalled = CB.isTailCall() &&
89  isInTailCallPosition(CB, MF.getTarget()) &&
90  (MF.getFunction()
91  .getFnAttribute("disable-tail-calls")
92  .getValueAsString() != "true");
93 
94  CallingConv::ID CallConv = CB.getCallingConv();
95  Type *RetTy = CB.getType();
96  bool IsVarArg = CB.getFunctionType()->isVarArg();
97 
99  getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
100  Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
101 
102  if (!Info.CanLowerReturn) {
103  // Callee requires sret demotion.
104  insertSRetOutgoingArgument(MIRBuilder, CB, Info);
105 
106  // The sret demotion isn't compatible with tail-calls, since the sret
107  // argument points into the caller's stack frame.
108  CanBeTailCalled = false;
109  }
110 
111  // First step is to marshall all the function's parameters into the correct
112  // physregs and memory locations. Gather the sequence of argument types that
113  // we'll pass to the assigner function.
114  unsigned i = 0;
115  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
116  for (auto &Arg : CB.args()) {
117  ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
118  i < NumFixedArgs};
119  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
120 
121  // If we have an explicit sret argument that is an Instruction, (i.e., it
122  // might point to function-local memory), we can't meaningfully tail-call.
123  if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
124  CanBeTailCalled = false;
125 
126  Info.OrigArgs.push_back(OrigArg);
127  ++i;
128  }
129 
130  // Try looking through a bitcast from one function type to another.
131  // Commonly happens with calls to objc_msgSend().
132  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
133  if (const Function *F = dyn_cast<Function>(CalleeV))
134  Info.Callee = MachineOperand::CreateGA(F, 0);
135  else
136  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
137 
138  Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
139  if (!Info.OrigRet.Ty->isVoidTy())
141 
142  Info.CB = &CB;
143  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
144  Info.CallConv = CallConv;
145  Info.SwiftErrorVReg = SwiftErrorVReg;
146  Info.IsMustTailCall = CB.isMustTailCall();
147  Info.IsTailCall = CanBeTailCalled;
148  Info.IsVarArg = IsVarArg;
149  return lowerCall(MIRBuilder, Info);
150 }
151 
152 template <typename FuncInfoTy>
154  const DataLayout &DL,
155  const FuncInfoTy &FuncInfo) const {
156  auto &Flags = Arg.Flags[0];
157  const AttributeList &Attrs = FuncInfo.getAttributes();
158  addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
159 
160  PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
161  if (PtrTy) {
162  Flags.setPointer();
164  }
165 
166  Align MemAlign = DL.getABITypeAlign(Arg.Ty);
167  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
169  unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
170 
171  Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
172  if (!ElementTy)
173  ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
174  if (!ElementTy)
175  ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
176  assert(ElementTy && "Must have byval, inalloca or preallocated type");
177  Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
178 
179  // For ByVal, alignment should be passed from FE. BE will guess if
180  // this info is not there but there are cases it cannot get right.
181  if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
182  MemAlign = *ParamAlign;
183  else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
184  MemAlign = *ParamAlign;
185  else
186  MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
187  } else if (OpIdx >= AttributeList::FirstArgIndex) {
188  if (auto ParamAlign =
189  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
190  MemAlign = *ParamAlign;
191  }
192  Flags.setMemAlign(MemAlign);
193  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
194 
195  // Don't try to use the returned attribute if the argument is marked as
196  // swiftself, since it won't be passed in x0.
197  if (Flags.isSwiftSelf())
198  Flags.setReturned(false);
199 }
200 
201 template void
202 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
203  const DataLayout &DL,
204  const Function &FuncInfo) const;
205 
206 template void
207 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
208  const DataLayout &DL,
209  const CallBase &FuncInfo) const;
210 
212  SmallVectorImpl<ArgInfo> &SplitArgs,
213  const DataLayout &DL,
214  CallingConv::ID CallConv,
216  LLVMContext &Ctx = OrigArg.Ty->getContext();
217 
218  SmallVector<EVT, 4> SplitVTs;
219  ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
220 
221  if (SplitVTs.size() == 0)
222  return;
223 
224  if (SplitVTs.size() == 1) {
225  // No splitting to do, but we want to replace the original type (e.g. [1 x
226  // double] -> double).
227  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
228  OrigArg.OrigArgIndex, OrigArg.Flags[0],
229  OrigArg.IsFixed, OrigArg.OrigValue);
230  return;
231  }
232 
233  // Create one ArgInfo for each virtual register in the original ArgInfo.
234  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
235 
236  bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
237  OrigArg.Ty, CallConv, false, DL);
238  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
239  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
240  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
241  OrigArg.Flags[0], OrigArg.IsFixed);
242  if (NeedsRegBlock)
243  SplitArgs.back().Flags[0].setInConsecutiveRegs();
244  }
245 
246  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
247 }
248 
249 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
250 static MachineInstrBuilder
252  ArrayRef<Register> SrcRegs) {
253  MachineRegisterInfo &MRI = *B.getMRI();
254  LLT LLTy = MRI.getType(DstRegs[0]);
255  LLT PartLLT = MRI.getType(SrcRegs[0]);
256 
257  // Deal with v3s16 split into v2s16
258  LLT LCMTy = getLCMType(LLTy, PartLLT);
259  if (LCMTy == LLTy) {
260  // Common case where no padding is needed.
261  assert(DstRegs.size() == 1);
262  return B.buildConcatVectors(DstRegs[0], SrcRegs);
263  }
264 
265  // We need to create an unmerge to the result registers, which may require
266  // widening the original value.
267  Register UnmergeSrcReg;
268  if (LCMTy != PartLLT) {
269  // e.g. A <3 x s16> value was split to <2 x s16>
270  // %register_value0:_(<2 x s16>)
271  // %register_value1:_(<2 x s16>)
272  // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
273  // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
274  // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
275  const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
276  Register Undef = B.buildUndef(PartLLT).getReg(0);
277 
278  // Build vector of undefs.
279  SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
280 
281  // Replace the first sources with the real registers.
282  std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
283  UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
284  } else {
285  // We don't need to widen anything if we're extracting a scalar which was
286  // promoted to a vector e.g. s8 -> v4s8 -> s8
287  assert(SrcRegs.size() == 1);
288  UnmergeSrcReg = SrcRegs[0];
289  }
290 
291  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
292 
293  SmallVector<Register, 8> PadDstRegs(NumDst);
294  std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
295 
296  // Create the excess dead defs for the unmerge.
297  for (int I = DstRegs.size(); I != NumDst; ++I)
298  PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
299 
300  return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
301 }
302 
303 /// Create a sequence of instructions to combine pieces split into register
304 /// typed values to the original IR value. \p OrigRegs contains the destination
305 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
306 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
308  ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
309  const ISD::ArgFlagsTy Flags) {
310  MachineRegisterInfo &MRI = *B.getMRI();
311 
312  if (PartLLT == LLTy) {
313  // We should have avoided introducing a new virtual register, and just
314  // directly assigned here.
315  assert(OrigRegs[0] == Regs[0]);
316  return;
317  }
318 
319  if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
320  Regs.size() == 1) {
321  B.buildBitcast(OrigRegs[0], Regs[0]);
322  return;
323  }
324 
325  // A vector PartLLT needs extending to LLTy's element size.
326  // E.g. <2 x s64> = G_SEXT <2 x s32>.
327  if (PartLLT.isVector() == LLTy.isVector() &&
328  PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
329  (!PartLLT.isVector() ||
330  PartLLT.getNumElements() == LLTy.getNumElements()) &&
331  OrigRegs.size() == 1 && Regs.size() == 1) {
332  Register SrcReg = Regs[0];
333 
334  LLT LocTy = MRI.getType(SrcReg);
335 
336  if (Flags.isSExt()) {
337  SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
338  .getReg(0);
339  } else if (Flags.isZExt()) {
340  SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
341  .getReg(0);
342  }
343 
344  // Sometimes pointers are passed zero extended.
345  LLT OrigTy = MRI.getType(OrigRegs[0]);
346  if (OrigTy.isPointer()) {
347  LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
348  B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
349  return;
350  }
351 
352  B.buildTrunc(OrigRegs[0], SrcReg);
353  return;
354  }
355 
356  if (!LLTy.isVector() && !PartLLT.isVector()) {
357  assert(OrigRegs.size() == 1);
358  LLT OrigTy = MRI.getType(OrigRegs[0]);
359 
360  unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
361  if (SrcSize == OrigTy.getSizeInBits())
362  B.buildMerge(OrigRegs[0], Regs);
363  else {
364  auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
365  B.buildTrunc(OrigRegs[0], Widened);
366  }
367 
368  return;
369  }
370 
371  if (PartLLT.isVector()) {
372  assert(OrigRegs.size() == 1);
373  SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
374 
375  // If PartLLT is a mismatched vector in both number of elements and element
376  // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
377  // have the same elt type, i.e. v4s32.
378  if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
379  PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
380  Regs.size() == 1) {
381  LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
382  .changeElementCount(PartLLT.getElementCount() * 2);
383  CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
384  PartLLT = NewTy;
385  }
386 
387  if (LLTy.getScalarType() == PartLLT.getElementType()) {
388  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
389  } else {
390  unsigned I = 0;
391  LLT GCDTy = getGCDType(LLTy, PartLLT);
392 
393  // We are both splitting a vector, and bitcasting its element types. Cast
394  // the source pieces into the appropriate number of pieces with the result
395  // element type.
396  for (Register SrcReg : CastRegs)
397  CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
398  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
399  }
400 
401  return;
402  }
403 
404  assert(LLTy.isVector() && !PartLLT.isVector());
405 
406  LLT DstEltTy = LLTy.getElementType();
407 
408  // Pointer information was discarded. We'll need to coerce some register types
409  // to avoid violating type constraints.
410  LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
411 
412  assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
413 
414  if (DstEltTy == PartLLT) {
415  // Vector was trivially scalarized.
416 
417  if (RealDstEltTy.isPointer()) {
418  for (Register Reg : Regs)
419  MRI.setType(Reg, RealDstEltTy);
420  }
421 
422  B.buildBuildVector(OrigRegs[0], Regs);
423  } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
424  // Deal with vector with 64-bit elements decomposed to 32-bit
425  // registers. Need to create intermediate 64-bit elements.
426  SmallVector<Register, 8> EltMerges;
427  int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
428 
429  assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
430 
431  for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
432  auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
433  // Fix the type in case this is really a vector of pointers.
434  MRI.setType(Merge.getReg(0), RealDstEltTy);
435  EltMerges.push_back(Merge.getReg(0));
436  Regs = Regs.drop_front(PartsPerElt);
437  }
438 
439  B.buildBuildVector(OrigRegs[0], EltMerges);
440  } else {
441  // Vector was split, and elements promoted to a wider type.
442  // FIXME: Should handle floating point promotions.
443  LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
444  auto BV = B.buildBuildVector(BVType, Regs);
445  B.buildTrunc(OrigRegs[0], BV);
446  }
447 }
448 
449 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
450 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
451 /// contain the type of scalar value extension if necessary.
452 ///
453 /// This is used for outgoing values (vregs to physregs)
455  Register SrcReg, LLT SrcTy, LLT PartTy,
456  unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
457  // We could just insert a regular copy, but this is unreachable at the moment.
458  assert(SrcTy != PartTy && "identical part types shouldn't reach here");
459 
460  const unsigned PartSize = PartTy.getSizeInBits();
461 
462  if (PartTy.isVector() == SrcTy.isVector() &&
463  PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
464  assert(DstRegs.size() == 1);
465  B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
466  return;
467  }
468 
469  if (SrcTy.isVector() && !PartTy.isVector() &&
470  PartSize > SrcTy.getElementType().getSizeInBits()) {
471  // Vector was scalarized, and the elements extended.
472  auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
473  for (int i = 0, e = DstRegs.size(); i != e; ++i)
474  B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
475  return;
476  }
477 
478  LLT GCDTy = getGCDType(SrcTy, PartTy);
479  if (GCDTy == PartTy) {
480  // If this already evenly divisible, we can create a simple unmerge.
481  B.buildUnmerge(DstRegs, SrcReg);
482  return;
483  }
484 
485  MachineRegisterInfo &MRI = *B.getMRI();
486  LLT DstTy = MRI.getType(DstRegs[0]);
487  LLT LCMTy = getLCMType(SrcTy, PartTy);
488 
489  const unsigned DstSize = DstTy.getSizeInBits();
490  const unsigned SrcSize = SrcTy.getSizeInBits();
491  unsigned CoveringSize = LCMTy.getSizeInBits();
492 
493  Register UnmergeSrc = SrcReg;
494 
495  if (CoveringSize != SrcSize) {
496  // For scalars, it's common to be able to use a simple extension.
497  if (SrcTy.isScalar() && DstTy.isScalar()) {
498  CoveringSize = alignTo(SrcSize, DstSize);
499  LLT CoverTy = LLT::scalar(CoveringSize);
500  UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
501  } else {
502  // Widen to the common type.
503  // FIXME: This should respect the extend type
504  Register Undef = B.buildUndef(SrcTy).getReg(0);
505  SmallVector<Register, 8> MergeParts(1, SrcReg);
506  for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
507  MergeParts.push_back(Undef);
508  UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
509  }
510  }
511 
512  // Unmerge to the original registers and pad with dead defs.
513  SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
514  for (unsigned Size = DstSize * DstRegs.size(); Size != CoveringSize;
515  Size += DstSize) {
516  UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
517  }
518 
519  B.buildUnmerge(UnmergeResults, UnmergeSrc);
520 }
521 
523  ValueHandler &Handler, ValueAssigner &Assigner,
525  CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
526  MachineFunction &MF = MIRBuilder.getMF();
527  const Function &F = MF.getFunction();
529 
530  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
531  if (!determineAssignments(Assigner, Args, CCInfo))
532  return false;
533 
534  return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
535  ThisReturnReg);
536 }
537 
538 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
539  if (Flags.isSExt())
540  return TargetOpcode::G_SEXT;
541  if (Flags.isZExt())
542  return TargetOpcode::G_ZEXT;
543  return TargetOpcode::G_ANYEXT;
544 }
545 
548  CCState &CCInfo) const {
549  LLVMContext &Ctx = CCInfo.getContext();
550  const CallingConv::ID CallConv = CCInfo.getCallingConv();
551 
552  unsigned NumArgs = Args.size();
553  for (unsigned i = 0; i != NumArgs; ++i) {
554  EVT CurVT = EVT::getEVT(Args[i].Ty);
555 
556  MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
557 
558  // If we need to split the type over multiple regs, check it's a scenario
559  // we currently support.
560  unsigned NumParts =
561  TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
562 
563  if (NumParts == 1) {
564  // Try to use the register type if we couldn't assign the VT.
565  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
566  Args[i].Flags[0], CCInfo))
567  return false;
568  continue;
569  }
570 
571  // For incoming arguments (physregs to vregs), we could have values in
572  // physregs (or memlocs) which we want to extract and copy to vregs.
573  // During this, we might have to deal with the LLT being split across
574  // multiple regs, so we have to record this information for later.
575  //
576  // If we have outgoing args, then we have the opposite case. We have a
577  // vreg with an LLT which we want to assign to a physical location, and
578  // we might have to record that the value has to be split later.
579 
580  // We're handling an incoming arg which is split over multiple regs.
581  // E.g. passing an s128 on AArch64.
582  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
583  Args[i].Flags.clear();
584 
585  for (unsigned Part = 0; Part < NumParts; ++Part) {
586  ISD::ArgFlagsTy Flags = OrigFlags;
587  if (Part == 0) {
588  Flags.setSplit();
589  } else {
590  Flags.setOrigAlign(Align(1));
591  if (Part == NumParts - 1)
592  Flags.setSplitEnd();
593  }
594 
595  Args[i].Flags.push_back(Flags);
596  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
597  Args[i].Flags[Part], CCInfo)) {
598  // Still couldn't assign this smaller part type for some reason.
599  return false;
600  }
601  }
602  }
603 
604  return true;
605 }
606 
609  CCState &CCInfo,
611  MachineIRBuilder &MIRBuilder,
612  Register ThisReturnReg) const {
613  MachineFunction &MF = MIRBuilder.getMF();
615  const Function &F = MF.getFunction();
616  const DataLayout &DL = F.getParent()->getDataLayout();
617 
618  const unsigned NumArgs = Args.size();
619 
620  for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
621  assert(j < ArgLocs.size() && "Skipped too many arg locs");
622  CCValAssign &VA = ArgLocs[j];
623  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
624 
625  if (VA.needsCustom()) {
626  unsigned NumArgRegs =
627  Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
628  if (!NumArgRegs)
629  return false;
630  j += NumArgRegs;
631  continue;
632  }
633 
634  const MVT ValVT = VA.getValVT();
635  const MVT LocVT = VA.getLocVT();
636 
637  const LLT LocTy(LocVT);
638  const LLT ValTy(ValVT);
639  const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
640  const EVT OrigVT = EVT::getEVT(Args[i].Ty);
641  const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
642 
643  // Expected to be multiple regs for a single incoming arg.
644  // There should be Regs.size() ArgLocs per argument.
645  // This should be the same as getNumRegistersForCallingConv
646  const unsigned NumParts = Args[i].Flags.size();
647 
648  // Now split the registers into the assigned types.
649  Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
650 
651  if (NumParts != 1 || NewLLT != OrigTy) {
652  // If we can't directly assign the register, we need one or more
653  // intermediate values.
654  Args[i].Regs.resize(NumParts);
655 
656  // For each split register, create and assign a vreg that will store
657  // the incoming component of the larger value. These will later be
658  // merged to form the final vreg.
659  for (unsigned Part = 0; Part < NumParts; ++Part)
660  Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
661  }
662 
663  assert((j + (NumParts - 1)) < ArgLocs.size() &&
664  "Too many regs for number of args");
665 
666  // Coerce into outgoing value types before register assignment.
667  if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
668  assert(Args[i].OrigRegs.size() == 1);
669  buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
670  ValTy, extendOpFromFlags(Args[i].Flags[0]));
671  }
672 
673  for (unsigned Part = 0; Part < NumParts; ++Part) {
674  Register ArgReg = Args[i].Regs[Part];
675  // There should be Regs.size() ArgLocs per argument.
676  VA = ArgLocs[j + Part];
677  const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
678 
679  if (VA.isMemLoc() && !Flags.isByVal()) {
680  // Individual pieces may have been spilled to the stack and others
681  // passed in registers.
682 
683  // TODO: The memory size may be larger than the value we need to
684  // store. We may need to adjust the offset for big endian targets.
685  LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
686 
687  MachinePointerInfo MPO;
688  Register StackAddr = Handler.getStackAddress(
689  MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
690 
691  Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
692  continue;
693  }
694 
695  if (VA.isMemLoc() && Flags.isByVal()) {
696  assert(Args[i].Regs.size() == 1 &&
697  "didn't expect split byval pointer");
698 
699  if (Handler.isIncomingArgumentHandler()) {
700  // We just need to copy the frame index value to the pointer.
701  MachinePointerInfo MPO;
702  Register StackAddr = Handler.getStackAddress(
703  Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
704  MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
705  } else {
706  // For outgoing byval arguments, insert the implicit copy byval
707  // implies, such that writes in the callee do not modify the caller's
708  // value.
709  uint64_t MemSize = Flags.getByValSize();
710  int64_t Offset = VA.getLocMemOffset();
711 
712  MachinePointerInfo DstMPO;
713  Register StackAddr =
714  Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
715 
716  MachinePointerInfo SrcMPO(Args[i].OrigValue);
717  if (!Args[i].OrigValue) {
718  // We still need to accurately track the stack address space if we
719  // don't know the underlying value.
720  const LLT PtrTy = MRI.getType(StackAddr);
721  SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
722  }
723 
724  Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
725  inferAlignFromPtrInfo(MF, DstMPO));
726 
727  Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
728  inferAlignFromPtrInfo(MF, SrcMPO));
729 
730  Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
731  DstMPO, DstAlign, SrcMPO, SrcAlign,
732  MemSize, VA);
733  }
734  continue;
735  }
736 
737  assert(!VA.needsCustom() && "custom loc should have been handled already");
738 
739  if (i == 0 && ThisReturnReg.isValid() &&
740  Handler.isIncomingArgumentHandler() &&
742  Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
743  continue;
744  }
745 
746  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
747  }
748 
749  // Now that all pieces have been assigned, re-pack the register typed values
750  // into the original value typed registers.
751  if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
752  // Merge the split registers into the expected larger result vregs of
753  // the original call.
754  buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
755  LocTy, Args[i].Flags[0]);
756  }
757 
758  j += NumParts - 1;
759  }
760 
761  return true;
762 }
763 
765  ArrayRef<Register> VRegs, Register DemoteReg,
766  int FI) const {
767  MachineFunction &MF = MIRBuilder.getMF();
769  const DataLayout &DL = MF.getDataLayout();
770 
771  SmallVector<EVT, 4> SplitVTs;
773  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
774 
775  assert(VRegs.size() == SplitVTs.size());
776 
777  unsigned NumValues = SplitVTs.size();
778  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
779  Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
780  LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
781 
783 
784  for (unsigned I = 0; I < NumValues; ++I) {
785  Register Addr;
786  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
787  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
788  MRI.getType(VRegs[I]),
789  commonAlignment(BaseAlign, Offsets[I]));
790  MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
791  }
792 }
793 
795  ArrayRef<Register> VRegs,
796  Register DemoteReg) const {
797  MachineFunction &MF = MIRBuilder.getMF();
799  const DataLayout &DL = MF.getDataLayout();
800 
801  SmallVector<EVT, 4> SplitVTs;
803  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
804 
805  assert(VRegs.size() == SplitVTs.size());
806 
807  unsigned NumValues = SplitVTs.size();
808  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
809  unsigned AS = DL.getAllocaAddrSpace();
810  LLT OffsetLLTy =
811  getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
812 
813  MachinePointerInfo PtrInfo(AS);
814 
815  for (unsigned I = 0; I < NumValues; ++I) {
816  Register Addr;
817  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
818  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
819  MRI.getType(VRegs[I]),
820  commonAlignment(BaseAlign, Offsets[I]));
821  MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
822  }
823 }
824 
826  const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
827  MachineRegisterInfo &MRI, const DataLayout &DL) const {
828  unsigned AS = DL.getAllocaAddrSpace();
829  DemoteReg = MRI.createGenericVirtualRegister(
830  LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
831 
832  Type *PtrTy = PointerType::get(F.getReturnType(), AS);
833 
834  SmallVector<EVT, 1> ValueVTs;
835  ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
836 
837  // NOTE: Assume that a pointer won't get split into more than one VT.
838  assert(ValueVTs.size() == 1);
839 
840  ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
843  DemoteArg.Flags[0].setSRet();
844  SplitArgs.insert(SplitArgs.begin(), DemoteArg);
845 }
846 
848  const CallBase &CB,
849  CallLoweringInfo &Info) const {
850  const DataLayout &DL = MIRBuilder.getDataLayout();
851  Type *RetTy = CB.getType();
852  unsigned AS = DL.getAllocaAddrSpace();
853  LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
854 
855  int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
856  DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
857 
858  Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
859  ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
861  setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
862  DemoteArg.Flags[0].setSRet();
863 
864  Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
865  Info.DemoteStackIndex = FI;
866  Info.DemoteRegister = DemoteReg;
867 }
868 
871  CCAssignFn *Fn) const {
872  for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
873  MVT VT = MVT::getVT(Outs[I].Ty);
874  if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
875  return false;
876  }
877  return true;
878 }
879 
883  const DataLayout &DL) const {
884  LLVMContext &Context = RetTy->getContext();
886 
887  SmallVector<EVT, 4> SplitVTs;
888  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
890 
891  for (EVT VT : SplitVTs) {
892  unsigned NumParts =
893  TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
894  MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
895  Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
896 
897  for (unsigned I = 0; I < NumParts; ++I) {
898  Outs.emplace_back(PartTy, Flags);
899  }
900  }
901 }
902 
904  const auto &F = MF.getFunction();
905  Type *ReturnType = F.getReturnType();
906  CallingConv::ID CallConv = F.getCallingConv();
907 
908  SmallVector<BaseArgInfo, 4> SplitArgs;
909  getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
910  MF.getDataLayout());
911  return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
912 }
913 
915  const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
916  const SmallVectorImpl<CCValAssign> &OutLocs,
917  const SmallVectorImpl<ArgInfo> &OutArgs) const {
918  for (unsigned i = 0; i < OutLocs.size(); ++i) {
919  auto &ArgLoc = OutLocs[i];
920  // If it's not a register, it's fine.
921  if (!ArgLoc.isRegLoc())
922  continue;
923 
924  MCRegister PhysReg = ArgLoc.getLocReg();
925 
926  // Only look at callee-saved registers.
927  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
928  continue;
929 
930  LLVM_DEBUG(
931  dbgs()
932  << "... Call has an argument passed in a callee-saved register.\n");
933 
934  // Check if it was copied from.
935  const ArgInfo &OutInfo = OutArgs[i];
936 
937  if (OutInfo.Regs.size() > 1) {
938  LLVM_DEBUG(
939  dbgs() << "... Cannot handle arguments in multiple registers.\n");
940  return false;
941  }
942 
943  // Check if we copy the register, walking through copies from virtual
944  // registers. Note that getDefIgnoringCopies does not ignore copies from
945  // physical registers.
946  MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
947  if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
948  LLVM_DEBUG(
949  dbgs()
950  << "... Parameter was not copied into a VReg, cannot tail call.\n");
951  return false;
952  }
953 
954  // Got a copy. Verify that it's the same as the register we want.
955  Register CopyRHS = RegDef->getOperand(1).getReg();
956  if (CopyRHS != PhysReg) {
957  LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
958  "VReg, cannot tail call.\n");
959  return false;
960  }
961  }
962 
963  return true;
964 }
965 
967  MachineFunction &MF,
968  SmallVectorImpl<ArgInfo> &InArgs,
969  ValueAssigner &CalleeAssigner,
970  ValueAssigner &CallerAssigner) const {
971  const Function &F = MF.getFunction();
972  CallingConv::ID CalleeCC = Info.CallConv;
973  CallingConv::ID CallerCC = F.getCallingConv();
974 
975  if (CallerCC == CalleeCC)
976  return true;
977 
979  CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
980  if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
981  return false;
982 
984  CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
985  if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
986  return false;
987 
988  // We need the argument locations to match up exactly. If there's more in
989  // one than the other, then we are done.
990  if (ArgLocs1.size() != ArgLocs2.size())
991  return false;
992 
993  // Make sure that each location is passed in exactly the same way.
994  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
995  const CCValAssign &Loc1 = ArgLocs1[i];
996  const CCValAssign &Loc2 = ArgLocs2[i];
997 
998  // We need both of them to be the same. So if one is a register and one
999  // isn't, we're done.
1000  if (Loc1.isRegLoc() != Loc2.isRegLoc())
1001  return false;
1002 
1003  if (Loc1.isRegLoc()) {
1004  // If they don't have the same register location, we're done.
1005  if (Loc1.getLocReg() != Loc2.getLocReg())
1006  return false;
1007 
1008  // They matched, so we can move to the next ArgLoc.
1009  continue;
1010  }
1011 
1012  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1013  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1014  return false;
1015  }
1016 
1017  return true;
1018 }
1019 
1021  const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1022  const MVT ValVT = VA.getValVT();
1023  if (ValVT != MVT::iPTR) {
1024  LLT ValTy(ValVT);
1025 
1026  // We lost the pointeriness going through CCValAssign, so try to restore it
1027  // based on the flags.
1028  if (Flags.isPointer()) {
1029  LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1030  ValTy.getScalarSizeInBits());
1031  if (ValVT.isVector())
1032  return LLT::vector(ValTy.getElementCount(), PtrTy);
1033  return PtrTy;
1034  }
1035 
1036  return ValTy;
1037  }
1038 
1039  unsigned AddrSpace = Flags.getPointerAddrSpace();
1040  return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1041 }
1042 
1044  const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1045  const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1046  const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1047  CCValAssign &VA) const {
1048  MachineFunction &MF = MIRBuilder.getMF();
1050  SrcPtrInfo,
1052  SrcAlign);
1053 
1055  DstPtrInfo,
1057  MemSize, DstAlign);
1058 
1059  const LLT PtrTy = MRI.getType(DstPtr);
1060  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1061 
1062  auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1063  MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1064 }
1065 
1067  CCValAssign &VA,
1068  unsigned MaxSizeBits) {
1069  LLT LocTy{VA.getLocVT()};
1070  LLT ValTy{VA.getValVT()};
1071 
1072  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1073  return ValReg;
1074 
1075  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1076  if (MaxSizeBits <= ValTy.getSizeInBits())
1077  return ValReg;
1078  LocTy = LLT::scalar(MaxSizeBits);
1079  }
1080 
1081  const LLT ValRegTy = MRI.getType(ValReg);
1082  if (ValRegTy.isPointer()) {
1083  // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1084  // we have to cast to do the extension.
1085  LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1086  ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1087  }
1088 
1089  switch (VA.getLocInfo()) {
1090  default: break;
1091  case CCValAssign::Full:
1092  case CCValAssign::BCvt:
1093  // FIXME: bitconverting between vector types may or may not be a
1094  // nop in big-endian situations.
1095  return ValReg;
1096  case CCValAssign::AExt: {
1097  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1098  return MIB.getReg(0);
1099  }
1100  case CCValAssign::SExt: {
1101  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1102  MIRBuilder.buildSExt(NewReg, ValReg);
1103  return NewReg;
1104  }
1105  case CCValAssign::ZExt: {
1106  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1107  MIRBuilder.buildZExt(NewReg, ValReg);
1108  return NewReg;
1109  }
1110  }
1111  llvm_unreachable("unable to extend register");
1112 }
1113 
1114 void CallLowering::ValueAssigner::anchor() {}
1115 
1117  Register SrcReg,
1118  LLT NarrowTy) {
1119  switch (VA.getLocInfo()) {
1120  case CCValAssign::LocInfo::ZExt: {
1121  return MIRBuilder
1122  .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1123  NarrowTy.getScalarSizeInBits())
1124  .getReg(0);
1125  }
1126  case CCValAssign::LocInfo::SExt: {
1127  return MIRBuilder
1128  .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1129  NarrowTy.getScalarSizeInBits())
1130  .getReg(0);
1131  break;
1132  }
1133  default:
1134  return SrcReg;
1135  }
1136 }
1137 
1138 /// Check if we can use a basic COPY instruction between the two types.
1139 ///
1140 /// We're currently building on top of the infrastructure using MVT, which loses
1141 /// pointer information in the CCValAssign. We accept copies from physical
1142 /// registers that have been reported as integers if it's to an equivalent sized
1143 /// pointer LLT.
1144 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1145  if (SrcTy == DstTy)
1146  return true;
1147 
1148  if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1149  return false;
1150 
1151  SrcTy = SrcTy.getScalarType();
1152  DstTy = DstTy.getScalarType();
1153 
1154  return (SrcTy.isPointer() && DstTy.isScalar()) ||
1155  (DstTy.isScalar() && SrcTy.isPointer());
1156 }
1157 
1159  Register PhysReg,
1160  CCValAssign &VA) {
1161  const MVT LocVT = VA.getLocVT();
1162  const LLT LocTy(LocVT);
1163  const LLT RegTy = MRI.getType(ValVReg);
1164 
1165  if (isCopyCompatibleType(RegTy, LocTy)) {
1166  MIRBuilder.buildCopy(ValVReg, PhysReg);
1167  return;
1168  }
1169 
1170  auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1171  auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1172  MIRBuilder.buildTrunc(ValVReg, Hint);
1173 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:153
i
i
Definition: README.txt:29
llvm::ISD::ArgFlagsTy::isInAlloca
bool isInAlloca() const
Definition: TargetCallingConv.h:91
llvm::CallLowering::ValueAssigner
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:157
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:148
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:38
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:443
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:69
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:36
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::ISD::ArgFlagsTy::setSwiftSelf
void setSwiftSelf()
Definition: TargetCallingConv.h:98
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:213
llvm::ISD::ArgFlagsTy::setNest
void setNest()
Definition: TargetCallingConv.h:119
llvm::CallLowering::handleAssignments
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, Register ThisReturnReg=Register()) const
Use Handler to insert code to handle the argument/return values represented by Args.
Definition: CallLowering.cpp:607
CallLowering.h
llvm::TargetLowering::functionArgumentNeedsConsecutiveRegisters
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Definition: TargetLowering.h:4041
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:191
buildCopyToRegs
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
Definition: CallLowering.cpp:454
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:426
llvm::Function
Definition: Function.h:61
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:691
llvm::ISD::ArgFlagsTy::setMemAlign
void setMemAlign(Align A)
Definition: TargetCallingConv.h:148
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::MVT::isVector
bool isVector() const
Return true if this is a vector value type.
Definition: MachineValueType.h:366
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:430
llvm::CallLowering::ValueHandler::extendRegister
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
Definition: CallLowering.cpp:1066
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::CallLowering::ValueHandler
Definition: CallLowering.h:225
llvm::CallLowering::insertSRetOutgoingArgument
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
Definition: CallLowering.cpp:847
llvm::ISD::ArgFlagsTy::setPointer
void setPointer()
Definition: TargetCallingConv.h:142
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:168
llvm::ISD::ArgFlagsTy::isZExt
bool isZExt() const
Definition: TargetCallingConv.h:73
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::CallLowering::ValueHandler::assignValueToReg
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
Module.h
llvm::AttributeList
Definition: Attributes.h:398
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1468
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1241
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:143
extendOpFromFlags
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
Definition: CallLowering.cpp:538
llvm::CallBase::isMustTailCall
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: Instructions.cpp:298
llvm::CallLowering::splitToValueTypes
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
Definition: CallLowering.cpp:211
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:46
llvm::LLT::changeElementCount
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
Definition: LowLevelTypeImpl.h:190
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::LLT::vector
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:57
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
MachineIRBuilder.h
buildCopyFromRegs
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
Definition: CallLowering.cpp:307
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:255
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::FunctionType::isVarArg
bool isVarArg() const
Definition: DerivedTypes.h:123
llvm::ISD::ArgFlagsTy::isSwiftSelf
bool isSwiftSelf() const
Definition: TargetCallingConv.h:97
MachineRegisterInfo.h
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:124
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition: LowLevelType.cpp:21
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::LLT::fixed_vector
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:75
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
TargetLowering.h
llvm::getGCDType
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:857
llvm::CallLowering::getTLI
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:333
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:636
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:39
llvm::CallLowering::resultsCompatible
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
Definition: CallLowering.cpp:966
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:33
TargetMachine.h
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:424
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:832
llvm::ISD::ArgFlagsTy::setReturned
void setReturned(bool V=true)
Definition: TargetCallingConv.h:122
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:182
llvm::CallLowering::checkReturnTypeForCallConv
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
Definition: CallLowering.cpp:903
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:85
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:150
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::CallLowering::ArgInfo
Definition: CallLowering.h:61
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::MachineIRBuilder::buildPtrToInt
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
Definition: MachineIRBuilder.h:623
mergeVectorRegsToResultRegs
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
Definition: CallLowering.cpp:251
llvm::LLT::getSizeInBits
TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:153
llvm::CallLowering::ValueHandler::getStackValueStoreType
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
Definition: CallLowering.cpp:1020
Utils.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineIRBuilder::getDataLayout
const DataLayout & getDataLayout() const
Definition: MachineIRBuilder.h:272
llvm::Function::getFnAttribute
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:652
llvm::CallLowering::determineAssignments
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
Definition: CallLowering.cpp:546
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:262
llvm::CCState::getContext
LLVMContext & getContext() const
Definition: CallingConvLower.h:257
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::CallLowering::ValueHandler::assignCustomValue
virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:288
llvm::ISD::ArgFlagsTy::setInReg
void setInReg()
Definition: TargetCallingConv.h:80
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:155
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::CCValAssign::getLocMemOffset
unsigned getLocMemOffset() const
Definition: CallingConvLower.h:151
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1449
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:301
llvm::EVT::getTypeForEVT
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:181
llvm::CCValAssign::isRegLoc
bool isRegLoc() const
Definition: CallingConvLower.h:145
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::ISD::ArgFlagsTy::setSExt
void setSExt()
Definition: TargetCallingConv.h:77
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:326
llvm::CCAssignFn
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
Definition: CallingConvLower.h:177
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:50
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:227
llvm::getLCMType
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:811
llvm::CallLowering::isTypeIsValidForThisReturn
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:583
llvm::CallLowering::ArgInfo::NoArgIndex
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
Definition: CallLowering.h:78
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:401
llvm::ISD::ArgFlagsTy::setSplit
void setSplit()
Definition: TargetCallingConv.h:133
llvm::MachineOperand::clobbersPhysReg
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
Definition: MachineOperand.h:617
llvm::TargetLoweringBase::getNumRegistersForCallingConv
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
Definition: TargetLowering.h:1530
llvm::CallBase::isTailCall
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: Instructions.cpp:305
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:212
llvm::CCValAssign::SExt
@ SExt
Definition: CallingConvLower.h:37
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
llvm::MachineIRBuilder::buildAssertSExt
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
Definition: MachineIRBuilder.cpp:243
uint64_t
llvm::CallLowering::checkReturn
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
Definition: CallLowering.cpp:869
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::MachineIRBuilder::buildAssertZExt
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
Definition: MachineIRBuilder.cpp:249
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::inferAlignFromPtrInfo
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:647
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:558
I
#define I(x, y, z)
Definition: MD5.cpp:59
Analysis.h
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:71
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:123
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:127
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::ISD::ArgFlagsTy::setOrigAlign
void setOrigAlign(Align A)
Definition: TargetCallingConv.h:164
llvm::CCState::getCallingConv
CallingConv::ID getCallingConv() const
Definition: CallingConvLower.h:259
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:121
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:642
llvm::ISD::ArgFlagsTy::setSwiftError
void setSwiftError()
Definition: TargetCallingConv.h:104
llvm::ISD::ArgFlagsTy::getPointerAddrSpace
unsigned getPointerAddrSpace() const
Definition: TargetCallingConv.h:187
isCopyCompatibleType
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
Definition: CallLowering.cpp:1144
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:83
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:188
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::ISD::ArgFlagsTy::setByVal
void setByVal()
Definition: TargetCallingConv.h:86
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:119
llvm::ARM::WinEH::ReturnType
ReturnType
Definition: ARMWinEH.h:25
llvm::CallLowering::addArgFlagsFromAttributes
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
Definition: CallLowering.cpp:72
llvm::MachineFunction
Definition: MachineFunction.h:230
addFlagsUsingAttrFn
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
Definition: CallLowering.cpp:35
llvm::ISD::ArgFlagsTy::setByValSize
void setByValSize(unsigned S)
Definition: TargetCallingConv.h:173
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition: CallingConvLower.h:142
llvm::CallLowering::ValueHandler::copyArgumentMemory
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
Definition: CallLowering.cpp:1043
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:850
DataLayout.h
llvm::MachineFrameInfo::CreateStackObject
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Definition: MachineFrameInfo.cpp:51
llvm::CallLowering::ValueHandler::getStackAddress
virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
llvm::ISD::ArgFlagsTy::setInAlloca
void setInAlloca()
Definition: TargetCallingConv.h:92
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::CallLowering::insertSRetLoads
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
Definition: CallLowering.cpp:764
llvm::MachineIRBuilder::buildCopy
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Definition: MachineIRBuilder.cpp:238
llvm::ISD::ArgFlagsTy::setSplitEnd
void setSplitEnd()
Definition: TargetCallingConv.h:136
uint32_t
llvm::ISD::ArgFlagsTy::isPreallocated
bool isPreallocated() const
Definition: TargetCallingConv.h:94
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ISD::ArgFlagsTy::setPointerAddrSpace
void setPointerAddrSpace(unsigned AS)
Definition: TargetCallingConv.h:188
llvm::MVT::iPTR
@ iPTR
Definition: MachineValueType.h:312
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::LLT::changeElementType
LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelTypeImpl.h:174
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:414
llvm::MachineIRBuilder::materializePtrAdd
Optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
Definition: MachineIRBuilder.cpp:193
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:675
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
NumFixedArgs
static unsigned NumFixedArgs
Definition: LanaiISelLowering.cpp:368
llvm::MachineIRBuilder::buildFrameIndex
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Definition: MachineIRBuilder.cpp:137
llvm::CallLowering::insertSRetIncomingArgument
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
Definition: CallLowering.cpp:825
llvm::CallLowering::getAttributesForArgIdx
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
Definition: CallLowering.cpp:63
j
return j(j<< 16)
llvm::CallLowering::canLowerReturn
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:483
llvm::CCValAssign::isMemLoc
bool isMemLoc() const
Definition: CallingConvLower.h:146
llvm::CallLowering::insertSRetStores
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
Definition: CallLowering.cpp:794
llvm::commonAlignment
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:211
llvm::ISD::ArgFlagsTy::getByValSize
unsigned getByValSize() const
Definition: TargetCallingConv.h:169
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:737
llvm::MachineIRBuilder::buildMemCpy
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
Definition: MachineIRBuilder.h:1840
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:592
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:622
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:153
llvm::ArrayRef::take_front
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:228
llvm::LLT::getSizeInBytes
TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelTypeImpl.h:163
llvm::CallLowering::IncomingValueHandler::assignValueToReg
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override
Provides a default implementation for argument handling.
Definition: CallLowering.cpp:1158
llvm::CallLowering::determineAndHandleAssignments
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg=Register()) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
Definition: CallLowering.cpp:522
llvm::ISD::ArgFlagsTy::isSExt
bool isSExt() const
Definition: TargetCallingConv.h:76
llvm::CallLowering::ArgInfo::Regs
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:62
llvm::ISD::ArgFlagsTy::isPointer
bool isPointer() const
Definition: TargetCallingConv.h:141
llvm::CallLowering::BaseArgInfo::Ty
Type * Ty
Definition: CallLowering.h:49
llvm::CallLowering::CallLoweringInfo
Definition: CallLowering.h:101
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:738
llvm::CallLowering::IncomingValueHandler::buildExtensionHint
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
Definition: CallLowering.cpp:1116
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1386
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:137
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:476
Instructions.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1003
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:732
llvm::MachineRegisterInfo::cloneVirtualRegister
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
Definition: MachineRegisterInfo.cpp:172
llvm::ISD::ArgFlagsTy::getNonZeroByValAlign
Align getNonZeroByValAlign() const
Definition: TargetCallingConv.h:153
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:419
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition: CallingConvLower.h:143
llvm::LLT::getElementCount
ElementCount getElementCount() const
Definition: LowLevelTypeImpl.h:144
llvm::ISD::ArgFlagsTy::setSRet
void setSRet()
Definition: TargetCallingConv.h:83
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
llvm::TargetLoweringBase::getRegisterTypeForCallingConv
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Definition: TargetLowering.h:1522
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MVT::getVT
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:526
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:501
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:260
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::CallLowering::getReturnInfo
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
Definition: CallLowering.cpp:880
MachineOperand.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::CallLowering::BaseArgInfo::IsFixed
bool IsFixed
Definition: CallLowering.h:51
llvm::ISD::ArgFlagsTy::setPreallocated
void setPreallocated()
Definition: TargetCallingConv.h:95
llvm::CallLowering::ArgInfo::OrigValue
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:72
LLVMContext.h
llvm::CallLowering::ValueAssigner::assignArg
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:181
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:387
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:237
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1263
llvm::CallLowering::ArgInfo::OrigArgIndex
unsigned OrigArgIndex
Index original Function's argument.
Definition: CallLowering.h:75
llvm::CallLowering::parametersInCSRMatch
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
Definition: CallLowering.cpp:914
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:43
llvm::ISD::ArgFlagsTy::setSwiftAsync
void setSwiftAsync()
Definition: TargetCallingConv.h:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AttributeList::FirstArgIndex
@ FirstArgIndex
Definition: Attributes.h:403
llvm::CallLowering::ValueHandler::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:239
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:154
llvm::CCValAssign::needsCustom
bool needsCustom() const
Definition: CallingConvLower.h:148
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1319
llvm::ISD::ArgFlagsTy::setZExt
void setZExt()
Definition: TargetCallingConv.h:74
getReg
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:580
llvm::CallLowering::ValueHandler::assignValueToAddress
virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:23
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:773
llvm::CallLowering::BaseArgInfo::Flags
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:50
llvm::LLT
Definition: LowLevelTypeImpl.h:40
llvm::CallLowering::lowerCall
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:543
llvm::CallLowering::setArgFlags
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Definition: CallLowering.cpp:153