LLVM  14.0.0git
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
27 
28 #define DEBUG_TYPE "call-lowering"
29 
30 using namespace llvm;
31 
32 void CallLowering::anchor() {}
33 
34 /// Helper function which updates \p Flags when \p AttrFn returns true.
35 static void
37  const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38  if (AttrFn(Attribute::SExt))
39  Flags.setSExt();
40  if (AttrFn(Attribute::ZExt))
41  Flags.setZExt();
42  if (AttrFn(Attribute::InReg))
43  Flags.setInReg();
44  if (AttrFn(Attribute::StructRet))
45  Flags.setSRet();
46  if (AttrFn(Attribute::Nest))
47  Flags.setNest();
48  if (AttrFn(Attribute::ByVal))
49  Flags.setByVal();
50  if (AttrFn(Attribute::Preallocated))
51  Flags.setPreallocated();
52  if (AttrFn(Attribute::InAlloca))
53  Flags.setInAlloca();
54  if (AttrFn(Attribute::Returned))
55  Flags.setReturned();
56  if (AttrFn(Attribute::SwiftSelf))
57  Flags.setSwiftSelf();
58  if (AttrFn(Attribute::SwiftAsync))
59  Flags.setSwiftAsync();
60  if (AttrFn(Attribute::SwiftError))
61  Flags.setSwiftError();
62 }
63 
65  unsigned ArgIdx) const {
66  ISD::ArgFlagsTy Flags;
67  addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
68  return Call.paramHasAttr(ArgIdx, Attr);
69  });
70  return Flags;
71 }
72 
74  const AttributeList &Attrs,
75  unsigned OpIdx) const {
76  addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
77  return Attrs.hasAttributeAtIndex(OpIdx, Attr);
78  });
79 }
80 
82  ArrayRef<Register> ResRegs,
84  Register SwiftErrorVReg,
85  std::function<unsigned()> GetCalleeReg) const {
87  const DataLayout &DL = MIRBuilder.getDataLayout();
88  MachineFunction &MF = MIRBuilder.getMF();
89  bool CanBeTailCalled = CB.isTailCall() &&
90  isInTailCallPosition(CB, MF.getTarget()) &&
91  (MF.getFunction()
92  .getFnAttribute("disable-tail-calls")
93  .getValueAsString() != "true");
94 
95  CallingConv::ID CallConv = CB.getCallingConv();
96  Type *RetTy = CB.getType();
97  bool IsVarArg = CB.getFunctionType()->isVarArg();
98 
100  getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
101  Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
102 
103  if (!Info.CanLowerReturn) {
104  // Callee requires sret demotion.
105  insertSRetOutgoingArgument(MIRBuilder, CB, Info);
106 
107  // The sret demotion isn't compatible with tail-calls, since the sret
108  // argument points into the caller's stack frame.
109  CanBeTailCalled = false;
110  }
111 
112  // First step is to marshall all the function's parameters into the correct
113  // physregs and memory locations. Gather the sequence of argument types that
114  // we'll pass to the assigner function.
115  unsigned i = 0;
116  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
117  for (auto &Arg : CB.args()) {
118  ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
119  i < NumFixedArgs};
120  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
121 
122  // If we have an explicit sret argument that is an Instruction, (i.e., it
123  // might point to function-local memory), we can't meaningfully tail-call.
124  if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
125  CanBeTailCalled = false;
126 
127  Info.OrigArgs.push_back(OrigArg);
128  ++i;
129  }
130 
131  // Try looking through a bitcast from one function type to another.
132  // Commonly happens with calls to objc_msgSend().
133  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
134  if (const Function *F = dyn_cast<Function>(CalleeV))
135  Info.Callee = MachineOperand::CreateGA(F, 0);
136  else
137  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
138 
139  Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
140  if (!Info.OrigRet.Ty->isVoidTy())
142 
143  Info.CB = &CB;
144  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
145  Info.CallConv = CallConv;
146  Info.SwiftErrorVReg = SwiftErrorVReg;
147  Info.IsMustTailCall = CB.isMustTailCall();
148  Info.IsTailCall = CanBeTailCalled;
149  Info.IsVarArg = IsVarArg;
150  return lowerCall(MIRBuilder, Info);
151 }
152 
153 template <typename FuncInfoTy>
155  const DataLayout &DL,
156  const FuncInfoTy &FuncInfo) const {
157  auto &Flags = Arg.Flags[0];
158  const AttributeList &Attrs = FuncInfo.getAttributes();
159  addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
160 
161  PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
162  if (PtrTy) {
163  Flags.setPointer();
165  }
166 
167  Align MemAlign = DL.getABITypeAlign(Arg.Ty);
168  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
170  unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
171 
172  Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
173  if (!ElementTy)
174  ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
175  if (!ElementTy)
176  ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
177  assert(ElementTy && "Must have byval, inalloca or preallocated type");
178  Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
179 
180  // For ByVal, alignment should be passed from FE. BE will guess if
181  // this info is not there but there are cases it cannot get right.
182  if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
183  MemAlign = *ParamAlign;
184  else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
185  MemAlign = *ParamAlign;
186  else
187  MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
188  } else if (OpIdx >= AttributeList::FirstArgIndex) {
189  if (auto ParamAlign =
190  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
191  MemAlign = *ParamAlign;
192  }
193  Flags.setMemAlign(MemAlign);
194  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
195 
196  // Don't try to use the returned attribute if the argument is marked as
197  // swiftself, since it won't be passed in x0.
198  if (Flags.isSwiftSelf())
199  Flags.setReturned(false);
200 }
201 
202 template void
203 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
204  const DataLayout &DL,
205  const Function &FuncInfo) const;
206 
207 template void
208 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
209  const DataLayout &DL,
210  const CallBase &FuncInfo) const;
211 
213  SmallVectorImpl<ArgInfo> &SplitArgs,
214  const DataLayout &DL,
215  CallingConv::ID CallConv,
217  LLVMContext &Ctx = OrigArg.Ty->getContext();
218 
219  SmallVector<EVT, 4> SplitVTs;
220  ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
221 
222  if (SplitVTs.size() == 0)
223  return;
224 
225  if (SplitVTs.size() == 1) {
226  // No splitting to do, but we want to replace the original type (e.g. [1 x
227  // double] -> double).
228  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
229  OrigArg.OrigArgIndex, OrigArg.Flags[0],
230  OrigArg.IsFixed, OrigArg.OrigValue);
231  return;
232  }
233 
234  // Create one ArgInfo for each virtual register in the original ArgInfo.
235  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
236 
237  bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
238  OrigArg.Ty, CallConv, false, DL);
239  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
240  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
241  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
242  OrigArg.Flags[0], OrigArg.IsFixed);
243  if (NeedsRegBlock)
244  SplitArgs.back().Flags[0].setInConsecutiveRegs();
245  }
246 
247  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
248 }
249 
250 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
251 static MachineInstrBuilder
253  ArrayRef<Register> SrcRegs) {
254  MachineRegisterInfo &MRI = *B.getMRI();
255  LLT LLTy = MRI.getType(DstRegs[0]);
256  LLT PartLLT = MRI.getType(SrcRegs[0]);
257 
258  // Deal with v3s16 split into v2s16
259  LLT LCMTy = getLCMType(LLTy, PartLLT);
260  if (LCMTy == LLTy) {
261  // Common case where no padding is needed.
262  assert(DstRegs.size() == 1);
263  return B.buildConcatVectors(DstRegs[0], SrcRegs);
264  }
265 
266  // We need to create an unmerge to the result registers, which may require
267  // widening the original value.
268  Register UnmergeSrcReg;
269  if (LCMTy != PartLLT) {
270  // e.g. A <3 x s16> value was split to <2 x s16>
271  // %register_value0:_(<2 x s16>)
272  // %register_value1:_(<2 x s16>)
273  // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
274  // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
275  // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
276  const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
277  Register Undef = B.buildUndef(PartLLT).getReg(0);
278 
279  // Build vector of undefs.
280  SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
281 
282  // Replace the first sources with the real registers.
283  std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
284  UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
285  } else {
286  // We don't need to widen anything if we're extracting a scalar which was
287  // promoted to a vector e.g. s8 -> v4s8 -> s8
288  assert(SrcRegs.size() == 1);
289  UnmergeSrcReg = SrcRegs[0];
290  }
291 
292  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
293 
294  SmallVector<Register, 8> PadDstRegs(NumDst);
295  std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
296 
297  // Create the excess dead defs for the unmerge.
298  for (int I = DstRegs.size(); I != NumDst; ++I)
299  PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
300 
301  return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
302 }
303 
304 /// Create a sequence of instructions to combine pieces split into register
305 /// typed values to the original IR value. \p OrigRegs contains the destination
306 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
307 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
309  ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
310  const ISD::ArgFlagsTy Flags) {
311  MachineRegisterInfo &MRI = *B.getMRI();
312 
313  if (PartLLT == LLTy) {
314  // We should have avoided introducing a new virtual register, and just
315  // directly assigned here.
316  assert(OrigRegs[0] == Regs[0]);
317  return;
318  }
319 
320  if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
321  Regs.size() == 1) {
322  B.buildBitcast(OrigRegs[0], Regs[0]);
323  return;
324  }
325 
326  // A vector PartLLT needs extending to LLTy's element size.
327  // E.g. <2 x s64> = G_SEXT <2 x s32>.
328  if (PartLLT.isVector() == LLTy.isVector() &&
329  PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
330  (!PartLLT.isVector() ||
331  PartLLT.getNumElements() == LLTy.getNumElements()) &&
332  OrigRegs.size() == 1 && Regs.size() == 1) {
333  Register SrcReg = Regs[0];
334 
335  LLT LocTy = MRI.getType(SrcReg);
336 
337  if (Flags.isSExt()) {
338  SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
339  .getReg(0);
340  } else if (Flags.isZExt()) {
341  SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
342  .getReg(0);
343  }
344 
345  // Sometimes pointers are passed zero extended.
346  LLT OrigTy = MRI.getType(OrigRegs[0]);
347  if (OrigTy.isPointer()) {
348  LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
349  B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
350  return;
351  }
352 
353  B.buildTrunc(OrigRegs[0], SrcReg);
354  return;
355  }
356 
357  if (!LLTy.isVector() && !PartLLT.isVector()) {
358  assert(OrigRegs.size() == 1);
359  LLT OrigTy = MRI.getType(OrigRegs[0]);
360 
361  unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
362  if (SrcSize == OrigTy.getSizeInBits())
363  B.buildMerge(OrigRegs[0], Regs);
364  else {
365  auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
366  B.buildTrunc(OrigRegs[0], Widened);
367  }
368 
369  return;
370  }
371 
372  if (PartLLT.isVector()) {
373  assert(OrigRegs.size() == 1);
374  SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
375 
376  // If PartLLT is a mismatched vector in both number of elements and element
377  // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
378  // have the same elt type, i.e. v4s32.
379  if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
380  PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
381  Regs.size() == 1) {
382  LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
383  .changeElementCount(PartLLT.getElementCount() * 2);
384  CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
385  PartLLT = NewTy;
386  }
387 
388  if (LLTy.getScalarType() == PartLLT.getElementType()) {
389  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
390  } else {
391  unsigned I = 0;
392  LLT GCDTy = getGCDType(LLTy, PartLLT);
393 
394  // We are both splitting a vector, and bitcasting its element types. Cast
395  // the source pieces into the appropriate number of pieces with the result
396  // element type.
397  for (Register SrcReg : CastRegs)
398  CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
399  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
400  }
401 
402  return;
403  }
404 
405  assert(LLTy.isVector() && !PartLLT.isVector());
406 
407  LLT DstEltTy = LLTy.getElementType();
408 
409  // Pointer information was discarded. We'll need to coerce some register types
410  // to avoid violating type constraints.
411  LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
412 
413  assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
414 
415  if (DstEltTy == PartLLT) {
416  // Vector was trivially scalarized.
417 
418  if (RealDstEltTy.isPointer()) {
419  for (Register Reg : Regs)
420  MRI.setType(Reg, RealDstEltTy);
421  }
422 
423  B.buildBuildVector(OrigRegs[0], Regs);
424  } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
425  // Deal with vector with 64-bit elements decomposed to 32-bit
426  // registers. Need to create intermediate 64-bit elements.
427  SmallVector<Register, 8> EltMerges;
428  int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
429 
430  assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
431 
432  for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
433  auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
434  // Fix the type in case this is really a vector of pointers.
435  MRI.setType(Merge.getReg(0), RealDstEltTy);
436  EltMerges.push_back(Merge.getReg(0));
437  Regs = Regs.drop_front(PartsPerElt);
438  }
439 
440  B.buildBuildVector(OrigRegs[0], EltMerges);
441  } else {
442  // Vector was split, and elements promoted to a wider type.
443  // FIXME: Should handle floating point promotions.
444  LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
445  auto BV = B.buildBuildVector(BVType, Regs);
446  B.buildTrunc(OrigRegs[0], BV);
447  }
448 }
449 
450 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
451 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
452 /// contain the type of scalar value extension if necessary.
453 ///
454 /// This is used for outgoing values (vregs to physregs)
456  Register SrcReg, LLT SrcTy, LLT PartTy,
457  unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
458  // We could just insert a regular copy, but this is unreachable at the moment.
459  assert(SrcTy != PartTy && "identical part types shouldn't reach here");
460 
461  const unsigned PartSize = PartTy.getSizeInBits();
462 
463  if (PartTy.isVector() == SrcTy.isVector() &&
464  PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
465  assert(DstRegs.size() == 1);
466  B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
467  return;
468  }
469 
470  if (SrcTy.isVector() && !PartTy.isVector() &&
471  PartSize > SrcTy.getElementType().getSizeInBits()) {
472  // Vector was scalarized, and the elements extended.
473  auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
474  for (int i = 0, e = DstRegs.size(); i != e; ++i)
475  B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
476  return;
477  }
478 
479  LLT GCDTy = getGCDType(SrcTy, PartTy);
480  if (GCDTy == PartTy) {
481  // If this already evenly divisible, we can create a simple unmerge.
482  B.buildUnmerge(DstRegs, SrcReg);
483  return;
484  }
485 
486  MachineRegisterInfo &MRI = *B.getMRI();
487  LLT DstTy = MRI.getType(DstRegs[0]);
488  LLT LCMTy = getLCMType(SrcTy, PartTy);
489 
490  const unsigned DstSize = DstTy.getSizeInBits();
491  const unsigned SrcSize = SrcTy.getSizeInBits();
492  unsigned CoveringSize = LCMTy.getSizeInBits();
493 
494  Register UnmergeSrc = SrcReg;
495 
496  if (CoveringSize != SrcSize) {
497  // For scalars, it's common to be able to use a simple extension.
498  if (SrcTy.isScalar() && DstTy.isScalar()) {
499  CoveringSize = alignTo(SrcSize, DstSize);
500  LLT CoverTy = LLT::scalar(CoveringSize);
501  UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
502  } else {
503  // Widen to the common type.
504  // FIXME: This should respect the extend type
505  Register Undef = B.buildUndef(SrcTy).getReg(0);
506  SmallVector<Register, 8> MergeParts(1, SrcReg);
507  for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
508  MergeParts.push_back(Undef);
509  UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
510  }
511  }
512 
513  // Unmerge to the original registers and pad with dead defs.
514  SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
515  for (unsigned Size = DstSize * DstRegs.size(); Size != CoveringSize;
516  Size += DstSize) {
517  UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
518  }
519 
520  B.buildUnmerge(UnmergeResults, UnmergeSrc);
521 }
522 
524  ValueHandler &Handler, ValueAssigner &Assigner,
526  CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
527  MachineFunction &MF = MIRBuilder.getMF();
528  const Function &F = MF.getFunction();
530 
531  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
532  if (!determineAssignments(Assigner, Args, CCInfo))
533  return false;
534 
535  return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
536  ThisReturnReg);
537 }
538 
539 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
540  if (Flags.isSExt())
541  return TargetOpcode::G_SEXT;
542  if (Flags.isZExt())
543  return TargetOpcode::G_ZEXT;
544  return TargetOpcode::G_ANYEXT;
545 }
546 
549  CCState &CCInfo) const {
550  LLVMContext &Ctx = CCInfo.getContext();
551  const CallingConv::ID CallConv = CCInfo.getCallingConv();
552 
553  unsigned NumArgs = Args.size();
554  for (unsigned i = 0; i != NumArgs; ++i) {
555  EVT CurVT = EVT::getEVT(Args[i].Ty);
556 
557  MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
558 
559  // If we need to split the type over multiple regs, check it's a scenario
560  // we currently support.
561  unsigned NumParts =
562  TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
563 
564  if (NumParts == 1) {
565  // Try to use the register type if we couldn't assign the VT.
566  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
567  Args[i].Flags[0], CCInfo))
568  return false;
569  continue;
570  }
571 
572  // For incoming arguments (physregs to vregs), we could have values in
573  // physregs (or memlocs) which we want to extract and copy to vregs.
574  // During this, we might have to deal with the LLT being split across
575  // multiple regs, so we have to record this information for later.
576  //
577  // If we have outgoing args, then we have the opposite case. We have a
578  // vreg with an LLT which we want to assign to a physical location, and
579  // we might have to record that the value has to be split later.
580 
581  // We're handling an incoming arg which is split over multiple regs.
582  // E.g. passing an s128 on AArch64.
583  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
584  Args[i].Flags.clear();
585 
586  for (unsigned Part = 0; Part < NumParts; ++Part) {
587  ISD::ArgFlagsTy Flags = OrigFlags;
588  if (Part == 0) {
589  Flags.setSplit();
590  } else {
591  Flags.setOrigAlign(Align(1));
592  if (Part == NumParts - 1)
593  Flags.setSplitEnd();
594  }
595 
596  Args[i].Flags.push_back(Flags);
597  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
598  Args[i].Flags[Part], CCInfo)) {
599  // Still couldn't assign this smaller part type for some reason.
600  return false;
601  }
602  }
603  }
604 
605  return true;
606 }
607 
610  CCState &CCInfo,
612  MachineIRBuilder &MIRBuilder,
613  Register ThisReturnReg) const {
614  MachineFunction &MF = MIRBuilder.getMF();
616  const Function &F = MF.getFunction();
617  const DataLayout &DL = F.getParent()->getDataLayout();
618 
619  const unsigned NumArgs = Args.size();
620 
621  // Stores thunks for outgoing register assignments. This is used so we delay
622  // generating register copies until mem loc assignments are done. We do this
623  // so that if the target is using the delayed stack protector feature, we can
624  // find the split point of the block accurately. E.g. if we have:
625  // G_STORE %val, %memloc
626  // $x0 = COPY %foo
627  // $x1 = COPY %bar
628  // CALL func
629  // ... then the split point for the block will correctly be at, and including,
630  // the copy to $x0. If instead the G_STORE instruction immediately precedes
631  // the CALL, then we'd prematurely choose the CALL as the split point, thus
632  // generating a split block with a CALL that uses undefined physregs.
633  SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
634 
635  for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
636  assert(j < ArgLocs.size() && "Skipped too many arg locs");
637  CCValAssign &VA = ArgLocs[j];
638  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
639 
640  if (VA.needsCustom()) {
641  std::function<void()> Thunk;
642  unsigned NumArgRegs = Handler.assignCustomValue(
643  Args[i], makeArrayRef(ArgLocs).slice(j), &Thunk);
644  if (Thunk)
645  DelayedOutgoingRegAssignments.emplace_back(Thunk);
646  if (!NumArgRegs)
647  return false;
648  j += NumArgRegs;
649  continue;
650  }
651 
652  const MVT ValVT = VA.getValVT();
653  const MVT LocVT = VA.getLocVT();
654 
655  const LLT LocTy(LocVT);
656  const LLT ValTy(ValVT);
657  const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
658  const EVT OrigVT = EVT::getEVT(Args[i].Ty);
659  const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
660 
661  // Expected to be multiple regs for a single incoming arg.
662  // There should be Regs.size() ArgLocs per argument.
663  // This should be the same as getNumRegistersForCallingConv
664  const unsigned NumParts = Args[i].Flags.size();
665 
666  // Now split the registers into the assigned types.
667  Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
668 
669  if (NumParts != 1 || NewLLT != OrigTy) {
670  // If we can't directly assign the register, we need one or more
671  // intermediate values.
672  Args[i].Regs.resize(NumParts);
673 
674  // For each split register, create and assign a vreg that will store
675  // the incoming component of the larger value. These will later be
676  // merged to form the final vreg.
677  for (unsigned Part = 0; Part < NumParts; ++Part)
678  Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
679  }
680 
681  assert((j + (NumParts - 1)) < ArgLocs.size() &&
682  "Too many regs for number of args");
683 
684  // Coerce into outgoing value types before register assignment.
685  if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
686  assert(Args[i].OrigRegs.size() == 1);
687  buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
688  ValTy, extendOpFromFlags(Args[i].Flags[0]));
689  }
690 
691  for (unsigned Part = 0; Part < NumParts; ++Part) {
692  Register ArgReg = Args[i].Regs[Part];
693  // There should be Regs.size() ArgLocs per argument.
694  VA = ArgLocs[j + Part];
695  const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
696 
697  if (VA.isMemLoc() && !Flags.isByVal()) {
698  // Individual pieces may have been spilled to the stack and others
699  // passed in registers.
700 
701  // TODO: The memory size may be larger than the value we need to
702  // store. We may need to adjust the offset for big endian targets.
703  LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
704 
705  MachinePointerInfo MPO;
706  Register StackAddr = Handler.getStackAddress(
707  MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
708 
709  Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
710  continue;
711  }
712 
713  if (VA.isMemLoc() && Flags.isByVal()) {
714  assert(Args[i].Regs.size() == 1 &&
715  "didn't expect split byval pointer");
716 
717  if (Handler.isIncomingArgumentHandler()) {
718  // We just need to copy the frame index value to the pointer.
719  MachinePointerInfo MPO;
720  Register StackAddr = Handler.getStackAddress(
721  Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
722  MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
723  } else {
724  // For outgoing byval arguments, insert the implicit copy byval
725  // implies, such that writes in the callee do not modify the caller's
726  // value.
727  uint64_t MemSize = Flags.getByValSize();
728  int64_t Offset = VA.getLocMemOffset();
729 
730  MachinePointerInfo DstMPO;
731  Register StackAddr =
732  Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
733 
734  MachinePointerInfo SrcMPO(Args[i].OrigValue);
735  if (!Args[i].OrigValue) {
736  // We still need to accurately track the stack address space if we
737  // don't know the underlying value.
738  const LLT PtrTy = MRI.getType(StackAddr);
739  SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
740  }
741 
742  Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
743  inferAlignFromPtrInfo(MF, DstMPO));
744 
745  Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
746  inferAlignFromPtrInfo(MF, SrcMPO));
747 
748  Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
749  DstMPO, DstAlign, SrcMPO, SrcAlign,
750  MemSize, VA);
751  }
752  continue;
753  }
754 
755  assert(!VA.needsCustom() && "custom loc should have been handled already");
756 
757  if (i == 0 && ThisReturnReg.isValid() &&
758  Handler.isIncomingArgumentHandler() &&
760  Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
761  continue;
762  }
763 
764  if (Handler.isIncomingArgumentHandler())
765  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
766  else {
767  DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
768  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
769  });
770  }
771  }
772 
773  // Now that all pieces have been assigned, re-pack the register typed values
774  // into the original value typed registers.
775  if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
776  // Merge the split registers into the expected larger result vregs of
777  // the original call.
778  buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
779  LocTy, Args[i].Flags[0]);
780  }
781 
782  j += NumParts - 1;
783  }
784  for (auto &Fn : DelayedOutgoingRegAssignments)
785  Fn();
786 
787  return true;
788 }
789 
791  ArrayRef<Register> VRegs, Register DemoteReg,
792  int FI) const {
793  MachineFunction &MF = MIRBuilder.getMF();
795  const DataLayout &DL = MF.getDataLayout();
796 
797  SmallVector<EVT, 4> SplitVTs;
799  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
800 
801  assert(VRegs.size() == SplitVTs.size());
802 
803  unsigned NumValues = SplitVTs.size();
804  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
805  Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
806  LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
807 
809 
810  for (unsigned I = 0; I < NumValues; ++I) {
811  Register Addr;
812  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
813  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
814  MRI.getType(VRegs[I]),
815  commonAlignment(BaseAlign, Offsets[I]));
816  MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
817  }
818 }
819 
821  ArrayRef<Register> VRegs,
822  Register DemoteReg) const {
823  MachineFunction &MF = MIRBuilder.getMF();
825  const DataLayout &DL = MF.getDataLayout();
826 
827  SmallVector<EVT, 4> SplitVTs;
829  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
830 
831  assert(VRegs.size() == SplitVTs.size());
832 
833  unsigned NumValues = SplitVTs.size();
834  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
835  unsigned AS = DL.getAllocaAddrSpace();
836  LLT OffsetLLTy =
837  getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
838 
839  MachinePointerInfo PtrInfo(AS);
840 
841  for (unsigned I = 0; I < NumValues; ++I) {
842  Register Addr;
843  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
844  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
845  MRI.getType(VRegs[I]),
846  commonAlignment(BaseAlign, Offsets[I]));
847  MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
848  }
849 }
850 
852  const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
853  MachineRegisterInfo &MRI, const DataLayout &DL) const {
854  unsigned AS = DL.getAllocaAddrSpace();
855  DemoteReg = MRI.createGenericVirtualRegister(
856  LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
857 
858  Type *PtrTy = PointerType::get(F.getReturnType(), AS);
859 
860  SmallVector<EVT, 1> ValueVTs;
861  ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
862 
863  // NOTE: Assume that a pointer won't get split into more than one VT.
864  assert(ValueVTs.size() == 1);
865 
866  ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
869  DemoteArg.Flags[0].setSRet();
870  SplitArgs.insert(SplitArgs.begin(), DemoteArg);
871 }
872 
874  const CallBase &CB,
875  CallLoweringInfo &Info) const {
876  const DataLayout &DL = MIRBuilder.getDataLayout();
877  Type *RetTy = CB.getType();
878  unsigned AS = DL.getAllocaAddrSpace();
879  LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
880 
881  int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
882  DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
883 
884  Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
885  ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
887  setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
888  DemoteArg.Flags[0].setSRet();
889 
890  Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
891  Info.DemoteStackIndex = FI;
892  Info.DemoteRegister = DemoteReg;
893 }
894 
897  CCAssignFn *Fn) const {
898  for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
899  MVT VT = MVT::getVT(Outs[I].Ty);
900  if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
901  return false;
902  }
903  return true;
904 }
905 
909  const DataLayout &DL) const {
910  LLVMContext &Context = RetTy->getContext();
912 
913  SmallVector<EVT, 4> SplitVTs;
914  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
916 
917  for (EVT VT : SplitVTs) {
918  unsigned NumParts =
919  TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
920  MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
921  Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
922 
923  for (unsigned I = 0; I < NumParts; ++I) {
924  Outs.emplace_back(PartTy, Flags);
925  }
926  }
927 }
928 
930  const auto &F = MF.getFunction();
931  Type *ReturnType = F.getReturnType();
932  CallingConv::ID CallConv = F.getCallingConv();
933 
934  SmallVector<BaseArgInfo, 4> SplitArgs;
935  getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
936  MF.getDataLayout());
937  return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
938 }
939 
941  const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
942  const SmallVectorImpl<CCValAssign> &OutLocs,
943  const SmallVectorImpl<ArgInfo> &OutArgs) const {
944  for (unsigned i = 0; i < OutLocs.size(); ++i) {
945  auto &ArgLoc = OutLocs[i];
946  // If it's not a register, it's fine.
947  if (!ArgLoc.isRegLoc())
948  continue;
949 
950  MCRegister PhysReg = ArgLoc.getLocReg();
951 
952  // Only look at callee-saved registers.
953  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
954  continue;
955 
956  LLVM_DEBUG(
957  dbgs()
958  << "... Call has an argument passed in a callee-saved register.\n");
959 
960  // Check if it was copied from.
961  const ArgInfo &OutInfo = OutArgs[i];
962 
963  if (OutInfo.Regs.size() > 1) {
964  LLVM_DEBUG(
965  dbgs() << "... Cannot handle arguments in multiple registers.\n");
966  return false;
967  }
968 
969  // Check if we copy the register, walking through copies from virtual
970  // registers. Note that getDefIgnoringCopies does not ignore copies from
971  // physical registers.
972  MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
973  if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
974  LLVM_DEBUG(
975  dbgs()
976  << "... Parameter was not copied into a VReg, cannot tail call.\n");
977  return false;
978  }
979 
980  // Got a copy. Verify that it's the same as the register we want.
981  Register CopyRHS = RegDef->getOperand(1).getReg();
982  if (CopyRHS != PhysReg) {
983  LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
984  "VReg, cannot tail call.\n");
985  return false;
986  }
987  }
988 
989  return true;
990 }
991 
993  MachineFunction &MF,
994  SmallVectorImpl<ArgInfo> &InArgs,
995  ValueAssigner &CalleeAssigner,
996  ValueAssigner &CallerAssigner) const {
997  const Function &F = MF.getFunction();
998  CallingConv::ID CalleeCC = Info.CallConv;
999  CallingConv::ID CallerCC = F.getCallingConv();
1000 
1001  if (CallerCC == CalleeCC)
1002  return true;
1003 
1005  CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
1006  if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
1007  return false;
1008 
1010  CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
1011  if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
1012  return false;
1013 
1014  // We need the argument locations to match up exactly. If there's more in
1015  // one than the other, then we are done.
1016  if (ArgLocs1.size() != ArgLocs2.size())
1017  return false;
1018 
1019  // Make sure that each location is passed in exactly the same way.
1020  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
1021  const CCValAssign &Loc1 = ArgLocs1[i];
1022  const CCValAssign &Loc2 = ArgLocs2[i];
1023 
1024  // We need both of them to be the same. So if one is a register and one
1025  // isn't, we're done.
1026  if (Loc1.isRegLoc() != Loc2.isRegLoc())
1027  return false;
1028 
1029  if (Loc1.isRegLoc()) {
1030  // If they don't have the same register location, we're done.
1031  if (Loc1.getLocReg() != Loc2.getLocReg())
1032  return false;
1033 
1034  // They matched, so we can move to the next ArgLoc.
1035  continue;
1036  }
1037 
1038  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1039  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1040  return false;
1041  }
1042 
1043  return true;
1044 }
1045 
1047  const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
1048  const MVT ValVT = VA.getValVT();
1049  if (ValVT != MVT::iPTR) {
1050  LLT ValTy(ValVT);
1051 
1052  // We lost the pointeriness going through CCValAssign, so try to restore it
1053  // based on the flags.
1054  if (Flags.isPointer()) {
1055  LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1056  ValTy.getScalarSizeInBits());
1057  if (ValVT.isVector())
1058  return LLT::vector(ValTy.getElementCount(), PtrTy);
1059  return PtrTy;
1060  }
1061 
1062  return ValTy;
1063  }
1064 
1065  unsigned AddrSpace = Flags.getPointerAddrSpace();
1066  return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1067 }
1068 
1070  const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1071  const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1072  const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1073  CCValAssign &VA) const {
1074  MachineFunction &MF = MIRBuilder.getMF();
1076  SrcPtrInfo,
1078  SrcAlign);
1079 
1081  DstPtrInfo,
1083  MemSize, DstAlign);
1084 
1085  const LLT PtrTy = MRI.getType(DstPtr);
1086  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1087 
1088  auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1089  MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1090 }
1091 
1093  CCValAssign &VA,
1094  unsigned MaxSizeBits) {
1095  LLT LocTy{VA.getLocVT()};
1096  LLT ValTy{VA.getValVT()};
1097 
1098  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1099  return ValReg;
1100 
1101  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1102  if (MaxSizeBits <= ValTy.getSizeInBits())
1103  return ValReg;
1104  LocTy = LLT::scalar(MaxSizeBits);
1105  }
1106 
1107  const LLT ValRegTy = MRI.getType(ValReg);
1108  if (ValRegTy.isPointer()) {
1109  // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1110  // we have to cast to do the extension.
1111  LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1112  ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1113  }
1114 
1115  switch (VA.getLocInfo()) {
1116  default: break;
1117  case CCValAssign::Full:
1118  case CCValAssign::BCvt:
1119  // FIXME: bitconverting between vector types may or may not be a
1120  // nop in big-endian situations.
1121  return ValReg;
1122  case CCValAssign::AExt: {
1123  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1124  return MIB.getReg(0);
1125  }
1126  case CCValAssign::SExt: {
1127  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1128  MIRBuilder.buildSExt(NewReg, ValReg);
1129  return NewReg;
1130  }
1131  case CCValAssign::ZExt: {
1132  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1133  MIRBuilder.buildZExt(NewReg, ValReg);
1134  return NewReg;
1135  }
1136  }
1137  llvm_unreachable("unable to extend register");
1138 }
1139 
1140 void CallLowering::ValueAssigner::anchor() {}
1141 
1143  Register SrcReg,
1144  LLT NarrowTy) {
1145  switch (VA.getLocInfo()) {
1146  case CCValAssign::LocInfo::ZExt: {
1147  return MIRBuilder
1148  .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1149  NarrowTy.getScalarSizeInBits())
1150  .getReg(0);
1151  }
1152  case CCValAssign::LocInfo::SExt: {
1153  return MIRBuilder
1154  .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1155  NarrowTy.getScalarSizeInBits())
1156  .getReg(0);
1157  break;
1158  }
1159  default:
1160  return SrcReg;
1161  }
1162 }
1163 
1164 /// Check if we can use a basic COPY instruction between the two types.
1165 ///
1166 /// We're currently building on top of the infrastructure using MVT, which loses
1167 /// pointer information in the CCValAssign. We accept copies from physical
1168 /// registers that have been reported as integers if it's to an equivalent sized
1169 /// pointer LLT.
1170 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1171  if (SrcTy == DstTy)
1172  return true;
1173 
1174  if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1175  return false;
1176 
1177  SrcTy = SrcTy.getScalarType();
1178  DstTy = DstTy.getScalarType();
1179 
1180  return (SrcTy.isPointer() && DstTy.isScalar()) ||
1181  (DstTy.isScalar() && SrcTy.isPointer());
1182 }
1183 
1185  Register PhysReg,
1186  CCValAssign VA) {
1187  const MVT LocVT = VA.getLocVT();
1188  const LLT LocTy(LocVT);
1189  const LLT RegTy = MRI.getType(ValVReg);
1190 
1191  if (isCopyCompatibleType(RegTy, LocTy)) {
1192  MIRBuilder.buildCopy(ValVReg, PhysReg);
1193  return;
1194  }
1195 
1196  auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1197  auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1198  MIRBuilder.buildTrunc(ValVReg, Hint);
1199 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:153
i
i
Definition: README.txt:29
llvm::ISD::ArgFlagsTy::isInAlloca
bool isInAlloca() const
Definition: TargetCallingConv.h:91
llvm::CallLowering::ValueAssigner
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:157
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:148
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:38
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:453
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:69
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AllocatorList.h:23
Reg
unsigned Reg
Definition: MachineSink.cpp:1558
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:36
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::ISD::ArgFlagsTy::setSwiftSelf
void setSwiftSelf()
Definition: TargetCallingConv.h:98
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:213
llvm::ISD::ArgFlagsTy::setNest
void setNest()
Definition: TargetCallingConv.h:119
llvm::CallLowering::handleAssignments
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, Register ThisReturnReg=Register()) const
Use Handler to insert code to handle the argument/return values represented by Args.
Definition: CallLowering.cpp:608
CallLowering.h
llvm::TargetLowering::functionArgumentNeedsConsecutiveRegisters
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Definition: TargetLowering.h:4106
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:191
buildCopyToRegs
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
Definition: CallLowering.cpp:455
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:425
llvm::Function
Definition: Function.h:62
llvm::CallLowering::IncomingValueHandler::assignValueToReg
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA) override
Provides a default implementation for argument handling.
Definition: CallLowering.cpp:1184
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:729
llvm::ISD::ArgFlagsTy::setMemAlign
void setMemAlign(Align A)
Definition: TargetCallingConv.h:148
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::MVT::isVector
bool isVector() const
Return true if this is a vector value type.
Definition: MachineValueType.h:366
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:431
llvm::CallLowering::ValueHandler::extendRegister
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
Definition: CallLowering.cpp:1092
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::CallLowering::ValueHandler
Definition: CallLowering.h:225
llvm::CallLowering::insertSRetOutgoingArgument
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
Definition: CallLowering.cpp:873
llvm::ISD::ArgFlagsTy::setPointer
void setPointer()
Definition: TargetCallingConv.h:142
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:168
llvm::ISD::ArgFlagsTy::isZExt
bool isZExt() const
Definition: TargetCallingConv.h:73
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
Module.h
llvm::AttributeList
Definition: Attributes.h:398
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1473
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:128
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1256
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:143
extendOpFromFlags
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
Definition: CallLowering.cpp:539
llvm::CallBase::isMustTailCall
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: Instructions.cpp:298
llvm::CallLowering::splitToValueTypes
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
Definition: CallLowering.cpp:212
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:46
llvm::LLT::changeElementCount
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
Definition: LowLevelTypeImpl.h:190
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::LLT::vector
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:57
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
MachineIRBuilder.h
buildCopyFromRegs
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
Definition: CallLowering.cpp:308
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:255
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::FunctionType::isVarArg
bool isVarArg() const
Definition: DerivedTypes.h:123
llvm::ISD::ArgFlagsTy::isSwiftSelf
bool isSwiftSelf() const
Definition: TargetCallingConv.h:97
MachineRegisterInfo.h
Context
ManagedStatic< detail::RecordContext > Context
Definition: Record.cpp:96
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:124
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition: LowLevelType.cpp:21
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::LLT::fixed_vector
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:75
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
TargetLowering.h
llvm::getGCDType
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:926
llvm::CallLowering::getTLI
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:336
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:644
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:39
llvm::CallLowering::resultsCompatible
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
Definition: CallLowering.cpp:992
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:33
TargetMachine.h
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:424
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:832
llvm::ISD::ArgFlagsTy::setReturned
void setReturned(bool V=true)
Definition: TargetCallingConv.h:122
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:182
llvm::CallLowering::checkReturnTypeForCallConv
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
Definition: CallLowering.cpp:929
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:85
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:150
llvm::CallLowering::ValueHandler::assignValueToReg
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
llvm::CallLowering::ArgInfo
Definition: CallLowering.h:61
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:499
llvm::MachineIRBuilder::buildPtrToInt
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
Definition: MachineIRBuilder.h:623
mergeVectorRegsToResultRegs
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
Definition: CallLowering.cpp:252
llvm::LLT::getSizeInBits
TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:153
llvm::CallLowering::ValueHandler::getStackValueStoreType
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
Definition: CallLowering.cpp:1046
Utils.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineIRBuilder::getDataLayout
const DataLayout & getDataLayout() const
Definition: MachineIRBuilder.h:272
llvm::Function::getFnAttribute
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:652
llvm::CallLowering::determineAssignments
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
Definition: CallLowering.cpp:547
llvm::CallLowering::ValueHandler::assignCustomValue
virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs, std::function< void()> *Thunk=nullptr)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:291
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:262
llvm::CCState::getContext
LLVMContext & getContext() const
Definition: CallingConvLower.h:257
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::ISD::ArgFlagsTy::setInReg
void setInReg()
Definition: TargetCallingConv.h:80
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:155
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::CCValAssign::getLocMemOffset
unsigned getLocMemOffset() const
Definition: CallingConvLower.h:151
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1454
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:301
llvm::EVT::getTypeForEVT
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:181
llvm::CCValAssign::isRegLoc
bool isRegLoc() const
Definition: CallingConvLower.h:145
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::ISD::ArgFlagsTy::setSExt
void setSExt()
Definition: TargetCallingConv.h:77
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:316
llvm::CCAssignFn
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
Definition: CallingConvLower.h:177
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:50
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:227
llvm::getLCMType
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:880
llvm::CallLowering::isTypeIsValidForThisReturn
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:586
llvm::CallLowering::ArgInfo::NoArgIndex
static const unsigned NoArgIndex
Sentinel value for implicit machine-level input arguments.
Definition: CallLowering.h:78
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:401
llvm::ISD::ArgFlagsTy::setSplit
void setSplit()
Definition: TargetCallingConv.h:133
llvm::MachineOperand::clobbersPhysReg
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
Definition: MachineOperand.h:617
llvm::TargetLoweringBase::getNumRegistersForCallingConv
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
Definition: TargetLowering.h:1547
llvm::CallBase::isTailCall
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: Instructions.cpp:305
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:212
llvm::CCValAssign::SExt
@ SExt
Definition: CallingConvLower.h:37
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
llvm::MachineIRBuilder::buildAssertSExt
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
Definition: MachineIRBuilder.cpp:243
uint64_t
llvm::CallLowering::checkReturn
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
Definition: CallLowering.cpp:895
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::MachineIRBuilder::buildAssertZExt
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
Definition: MachineIRBuilder.cpp:249
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:38
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::inferAlignFromPtrInfo
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:686
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:558
I
#define I(x, y, z)
Definition: MD5.cpp:59
Analysis.h
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:70
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:123
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:127
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::ISD::ArgFlagsTy::setOrigAlign
void setOrigAlign(Align A)
Definition: TargetCallingConv.h:164
llvm::CCState::getCallingConv
CallingConv::ID getCallingConv() const
Definition: CallingConvLower.h:259
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:121
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:650
llvm::ISD::ArgFlagsTy::setSwiftError
void setSwiftError()
Definition: TargetCallingConv.h:104
llvm::ISD::ArgFlagsTy::getPointerAddrSpace
unsigned getPointerAddrSpace() const
Definition: TargetCallingConv.h:187
isCopyCompatibleType
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
Definition: CallLowering.cpp:1170
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:83
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:188
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:31
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::ISD::ArgFlagsTy::setByVal
void setByVal()
Definition: TargetCallingConv.h:86
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:119
llvm::ARM::WinEH::ReturnType
ReturnType
Definition: ARMWinEH.h:25
llvm::CallLowering::addArgFlagsFromAttributes
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
Definition: CallLowering.cpp:73
llvm::MachineFunction
Definition: MachineFunction.h:234
addFlagsUsingAttrFn
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
Definition: CallLowering.cpp:36
llvm::ISD::ArgFlagsTy::setByValSize
void setByValSize(unsigned S)
Definition: TargetCallingConv.h:173
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition: CallingConvLower.h:142
llvm::CallLowering::ValueHandler::copyArgumentMemory
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
Definition: CallLowering.cpp:1069
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:850
DataLayout.h
llvm::MachineFrameInfo::CreateStackObject
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Definition: MachineFrameInfo.cpp:51
llvm::CallLowering::ValueHandler::getStackAddress
virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
llvm::ISD::ArgFlagsTy::setInAlloca
void setInAlloca()
Definition: TargetCallingConv.h:92
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:489
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::CallLowering::insertSRetLoads
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
Definition: CallLowering.cpp:790
llvm::MachineIRBuilder::buildCopy
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Definition: MachineIRBuilder.cpp:238
llvm::ISD::ArgFlagsTy::setSplitEnd
void setSplitEnd()
Definition: TargetCallingConv.h:136
uint32_t
llvm::ISD::ArgFlagsTy::isPreallocated
bool isPreallocated() const
Definition: TargetCallingConv.h:94
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::ISD::ArgFlagsTy::setPointerAddrSpace
void setPointerAddrSpace(unsigned AS)
Definition: TargetCallingConv.h:188
llvm::MVT::iPTR
@ iPTR
Definition: MachineValueType.h:312
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::LLT::changeElementType
LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelTypeImpl.h:174
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:135
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:414
llvm::MachineIRBuilder::materializePtrAdd
Optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
Definition: MachineIRBuilder.cpp:193
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
NumFixedArgs
static unsigned NumFixedArgs
Definition: LanaiISelLowering.cpp:368
llvm::MachineIRBuilder::buildFrameIndex
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Definition: MachineIRBuilder.cpp:137
llvm::CallLowering::insertSRetIncomingArgument
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
Definition: CallLowering.cpp:851
llvm::CallLowering::getAttributesForArgIdx
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
Definition: CallLowering.cpp:64
j
return j(j<< 16)
llvm::CallLowering::canLowerReturn
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:486
llvm::CCValAssign::isMemLoc
bool isMemLoc() const
Definition: CallingConvLower.h:146
llvm::CallLowering::insertSRetStores
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
Definition: CallLowering.cpp:820
llvm::commonAlignment
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:211
llvm::ISD::ArgFlagsTy::getByValSize
unsigned getByValSize() const
Definition: TargetCallingConv.h:169
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:737
llvm::MachineIRBuilder::buildMemCpy
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
Definition: MachineIRBuilder.h:1840
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:600
CallingConvLower.h
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:630
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:151
llvm::ArrayRef::take_front
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:226
llvm::LLT::getSizeInBytes
TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelTypeImpl.h:163
llvm::CallLowering::determineAndHandleAssignments
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg=Register()) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
Definition: CallLowering.cpp:523
llvm::ISD::ArgFlagsTy::isSExt
bool isSExt() const
Definition: TargetCallingConv.h:76
llvm::CallLowering::ArgInfo::Regs
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:62
llvm::ISD::ArgFlagsTy::isPointer
bool isPointer() const
Definition: TargetCallingConv.h:141
llvm::CallLowering::BaseArgInfo::Ty
Type * Ty
Definition: CallLowering.h:49
llvm::CallLowering::CallLoweringInfo
Definition: CallLowering.h:101
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:776
llvm::CallLowering::IncomingValueHandler::buildExtensionHint
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
Definition: CallLowering.cpp:1142
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1391
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:137
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:474
Instructions.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:1008
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:732
llvm::MachineRegisterInfo::cloneVirtualRegister
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
Definition: MachineRegisterInfo.cpp:172
llvm::ISD::ArgFlagsTy::getNonZeroByValAlign
Align getNonZeroByValAlign() const
Definition: TargetCallingConv.h:153
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:419
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition: CallingConvLower.h:143
llvm::LLT::getElementCount
ElementCount getElementCount() const
Definition: LowLevelTypeImpl.h:144
llvm::ISD::ArgFlagsTy::setSRet
void setSRet()
Definition: TargetCallingConv.h:83
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
llvm::TargetLoweringBase::getRegisterTypeForCallingConv
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Definition: TargetLowering.h:1539
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MVT::getVT
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:526
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:525
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:261
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::CallLowering::getReturnInfo
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
Definition: CallLowering.cpp:906
MachineOperand.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1176
llvm::CallLowering::BaseArgInfo::IsFixed
bool IsFixed
Definition: CallLowering.h:51
llvm::ISD::ArgFlagsTy::setPreallocated
void setPreallocated()
Definition: TargetCallingConv.h:95
llvm::CallLowering::ArgInfo::OrigValue
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:72
LLVMContext.h
llvm::CallLowering::ValueAssigner::assignArg
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:181
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:387
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:237
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1263
llvm::CallLowering::ArgInfo::OrigArgIndex
unsigned OrigArgIndex
Index original Function's argument.
Definition: CallLowering.h:75
llvm::CallLowering::parametersInCSRMatch
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
Definition: CallLowering.cpp:940
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:43
llvm::ISD::ArgFlagsTy::setSwiftAsync
void setSwiftAsync()
Definition: TargetCallingConv.h:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AttributeList::FirstArgIndex
@ FirstArgIndex
Definition: Attributes.h:403
llvm::CallLowering::ValueHandler::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:239
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:152
llvm::CCValAssign::needsCustom
bool needsCustom() const
Definition: CallingConvLower.h:148
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1334
llvm::ISD::ArgFlagsTy::setZExt
void setZExt()
Definition: TargetCallingConv.h:74
getReg
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
Definition: MipsDisassembler.cpp:572
llvm::CallLowering::ValueHandler::assignValueToAddress
virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:773
llvm::CallLowering::BaseArgInfo::Flags
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:50
llvm::LLT
Definition: LowLevelTypeImpl.h:40
llvm::CallLowering::lowerCall
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:546
llvm::CallLowering::setArgFlags
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Definition: CallLowering.cpp:154