LLVM  13.0.0git
CallLowering.cpp
Go to the documentation of this file.
1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
36  const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37  if (AttrFn(Attribute::SExt))
38  Flags.setSExt();
39  if (AttrFn(Attribute::ZExt))
40  Flags.setZExt();
41  if (AttrFn(Attribute::InReg))
42  Flags.setInReg();
43  if (AttrFn(Attribute::StructRet))
44  Flags.setSRet();
45  if (AttrFn(Attribute::Nest))
46  Flags.setNest();
47  if (AttrFn(Attribute::ByVal))
48  Flags.setByVal();
49  if (AttrFn(Attribute::Preallocated))
50  Flags.setPreallocated();
51  if (AttrFn(Attribute::InAlloca))
52  Flags.setInAlloca();
53  if (AttrFn(Attribute::Returned))
54  Flags.setReturned();
55  if (AttrFn(Attribute::SwiftSelf))
56  Flags.setSwiftSelf();
57  if (AttrFn(Attribute::SwiftAsync))
58  Flags.setSwiftAsync();
59  if (AttrFn(Attribute::SwiftError))
60  Flags.setSwiftError();
61 }
62 
64  unsigned ArgIdx) const {
65  ISD::ArgFlagsTy Flags;
66  addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
67  return Call.paramHasAttr(ArgIdx, Attr);
68  });
69  return Flags;
70 }
71 
73  const AttributeList &Attrs,
74  unsigned OpIdx) const {
75  addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
76  return Attrs.hasAttribute(OpIdx, Attr);
77  });
78 }
79 
81  ArrayRef<Register> ResRegs,
83  Register SwiftErrorVReg,
84  std::function<unsigned()> GetCalleeReg) const {
86  const DataLayout &DL = MIRBuilder.getDataLayout();
87  MachineFunction &MF = MIRBuilder.getMF();
88  bool CanBeTailCalled = CB.isTailCall() &&
89  isInTailCallPosition(CB, MF.getTarget()) &&
90  (MF.getFunction()
91  .getFnAttribute("disable-tail-calls")
92  .getValueAsString() != "true");
93 
94  CallingConv::ID CallConv = CB.getCallingConv();
95  Type *RetTy = CB.getType();
96  bool IsVarArg = CB.getFunctionType()->isVarArg();
97 
99  getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
100  Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
101 
102  if (!Info.CanLowerReturn) {
103  // Callee requires sret demotion.
104  insertSRetOutgoingArgument(MIRBuilder, CB, Info);
105 
106  // The sret demotion isn't compatible with tail-calls, since the sret
107  // argument points into the caller's stack frame.
108  CanBeTailCalled = false;
109  }
110 
111  // First step is to marshall all the function's parameters into the correct
112  // physregs and memory locations. Gather the sequence of argument types that
113  // we'll pass to the assigner function.
114  unsigned i = 0;
115  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
116  for (auto &Arg : CB.args()) {
117  ArgInfo OrigArg{ArgRegs[i], *Arg.get(), getAttributesForArgIdx(CB, i),
118  i < NumFixedArgs};
119  setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
120 
121  // If we have an explicit sret argument that is an Instruction, (i.e., it
122  // might point to function-local memory), we can't meaningfully tail-call.
123  if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
124  CanBeTailCalled = false;
125 
126  Info.OrigArgs.push_back(OrigArg);
127  ++i;
128  }
129 
130  // Try looking through a bitcast from one function type to another.
131  // Commonly happens with calls to objc_msgSend().
132  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
133  if (const Function *F = dyn_cast<Function>(CalleeV))
134  Info.Callee = MachineOperand::CreateGA(F, 0);
135  else
136  Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
137 
138  Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}};
139  if (!Info.OrigRet.Ty->isVoidTy())
141 
142  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
143  Info.CallConv = CallConv;
144  Info.SwiftErrorVReg = SwiftErrorVReg;
145  Info.IsMustTailCall = CB.isMustTailCall();
146  Info.IsTailCall = CanBeTailCalled;
147  Info.IsVarArg = IsVarArg;
148  return lowerCall(MIRBuilder, Info);
149 }
150 
151 template <typename FuncInfoTy>
153  const DataLayout &DL,
154  const FuncInfoTy &FuncInfo) const {
155  auto &Flags = Arg.Flags[0];
156  const AttributeList &Attrs = FuncInfo.getAttributes();
157  addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
158 
159  Align MemAlign = DL.getABITypeAlign(Arg.Ty);
160  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
162  Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
163 
164  auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
165  Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
166 
167  // For ByVal, alignment should be passed from FE. BE will guess if
168  // this info is not there but there are cases it cannot get right.
169  if (auto ParamAlign =
170  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
171  MemAlign = *ParamAlign;
172  else if ((ParamAlign =
173  FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex)))
174  MemAlign = *ParamAlign;
175  else
176  MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
177  } else if (OpIdx >= AttributeList::FirstArgIndex) {
178  if (auto ParamAlign =
179  FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
180  MemAlign = *ParamAlign;
181  }
182  Flags.setMemAlign(MemAlign);
183  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
184 
185  // Don't try to use the returned attribute if the argument is marked as
186  // swiftself, since it won't be passed in x0.
187  if (Flags.isSwiftSelf())
188  Flags.setReturned(false);
189 }
190 
191 template void
192 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
193  const DataLayout &DL,
194  const Function &FuncInfo) const;
195 
196 template void
197 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
198  const DataLayout &DL,
199  const CallBase &FuncInfo) const;
200 
202  SmallVectorImpl<ArgInfo> &SplitArgs,
203  const DataLayout &DL,
204  CallingConv::ID CallConv) const {
205  LLVMContext &Ctx = OrigArg.Ty->getContext();
206 
207  SmallVector<EVT, 4> SplitVTs;
209  ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
210 
211  if (SplitVTs.size() == 0)
212  return;
213 
214  if (SplitVTs.size() == 1) {
215  // No splitting to do, but we want to replace the original type (e.g. [1 x
216  // double] -> double).
217  SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
218  OrigArg.Flags[0], OrigArg.IsFixed,
219  OrigArg.OrigValue);
220  return;
221  }
222 
223  // Create one ArgInfo for each virtual register in the original ArgInfo.
224  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
225 
226  bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
227  OrigArg.Ty, CallConv, false);
228  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
229  Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
230  SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
231  OrigArg.IsFixed);
232  if (NeedsRegBlock)
233  SplitArgs.back().Flags[0].setInConsecutiveRegs();
234  }
235 
236  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
237 }
238 
240  Type *PackedTy,
241  MachineIRBuilder &MIRBuilder) const {
242  assert(DstRegs.size() > 1 && "Nothing to unpack");
243 
244  const DataLayout &DL = MIRBuilder.getDataLayout();
245 
246  SmallVector<LLT, 8> LLTs;
248  computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
249  assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
250 
251  for (unsigned i = 0; i < DstRegs.size(); ++i)
252  MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
253 }
254 
255 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
256 static MachineInstrBuilder
258  ArrayRef<Register> SrcRegs) {
259  MachineRegisterInfo &MRI = *B.getMRI();
260  LLT LLTy = MRI.getType(DstRegs[0]);
261  LLT PartLLT = MRI.getType(SrcRegs[0]);
262 
263  // Deal with v3s16 split into v2s16
264  LLT LCMTy = getLCMType(LLTy, PartLLT);
265  if (LCMTy == LLTy) {
266  // Common case where no padding is needed.
267  assert(DstRegs.size() == 1);
268  return B.buildConcatVectors(DstRegs[0], SrcRegs);
269  }
270 
271  // We need to create an unmerge to the result registers, which may require
272  // widening the original value.
273  Register UnmergeSrcReg;
274  if (LCMTy != PartLLT) {
275  // e.g. A <3 x s16> value was split to <2 x s16>
276  // %register_value0:_(<2 x s16>)
277  // %register_value1:_(<2 x s16>)
278  // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
279  // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
280  // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
281  const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
282  Register Undef = B.buildUndef(PartLLT).getReg(0);
283 
284  // Build vector of undefs.
285  SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
286 
287  // Replace the first sources with the real registers.
288  std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
289  UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
290  } else {
291  // We don't need to widen anything if we're extracting a scalar which was
292  // promoted to a vector e.g. s8 -> v4s8 -> s8
293  assert(SrcRegs.size() == 1);
294  UnmergeSrcReg = SrcRegs[0];
295  }
296 
297  int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
298 
299  SmallVector<Register, 8> PadDstRegs(NumDst);
300  std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
301 
302  // Create the excess dead defs for the unmerge.
303  for (int I = DstRegs.size(); I != NumDst; ++I)
304  PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
305 
306  return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
307 }
308 
309 /// Create a sequence of instructions to combine pieces split into register
310 /// typed values to the original IR value. \p OrigRegs contains the destination
311 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
312 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
314  ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
315  const ISD::ArgFlagsTy Flags) {
316  MachineRegisterInfo &MRI = *B.getMRI();
317 
318  if (PartLLT == LLTy) {
319  // We should have avoided introducing a new virtual register, and just
320  // directly assigned here.
321  assert(OrigRegs[0] == Regs[0]);
322  return;
323  }
324 
325  if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
326  Regs.size() == 1) {
327  B.buildBitcast(OrigRegs[0], Regs[0]);
328  return;
329  }
330 
331  // A vector PartLLT needs extending to LLTy's element size.
332  // E.g. <2 x s64> = G_SEXT <2 x s32>.
333  if (PartLLT.isVector() == LLTy.isVector() &&
334  PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
335  (!PartLLT.isVector() ||
336  PartLLT.getNumElements() == LLTy.getNumElements()) &&
337  OrigRegs.size() == 1 && Regs.size() == 1) {
338  Register SrcReg = Regs[0];
339 
340  LLT LocTy = MRI.getType(SrcReg);
341 
342  if (Flags.isSExt()) {
343  SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
344  .getReg(0);
345  } else if (Flags.isZExt()) {
346  SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
347  .getReg(0);
348  }
349 
350  B.buildTrunc(OrigRegs[0], SrcReg);
351  return;
352  }
353 
354  if (!LLTy.isVector() && !PartLLT.isVector()) {
355  assert(OrigRegs.size() == 1);
356  LLT OrigTy = MRI.getType(OrigRegs[0]);
357 
358  unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
359  if (SrcSize == OrigTy.getSizeInBits())
360  B.buildMerge(OrigRegs[0], Regs);
361  else {
362  auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
363  B.buildTrunc(OrigRegs[0], Widened);
364  }
365 
366  return;
367  }
368 
369  if (PartLLT.isVector()) {
370  assert(OrigRegs.size() == 1);
371  SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
372 
373  // If PartLLT is a mismatched vector in both number of elements and element
374  // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
375  // have the same elt type, i.e. v4s32.
376  if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
377  PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
378  Regs.size() == 1) {
379  LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
380  .changeNumElements(PartLLT.getNumElements() * 2);
381  CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
382  PartLLT = NewTy;
383  }
384 
385  if (LLTy.getScalarType() == PartLLT.getElementType()) {
386  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
387  } else {
388  unsigned I = 0;
389  LLT GCDTy = getGCDType(LLTy, PartLLT);
390 
391  // We are both splitting a vector, and bitcasting its element types. Cast
392  // the source pieces into the appropriate number of pieces with the result
393  // element type.
394  for (Register SrcReg : CastRegs)
395  CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
396  mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
397  }
398 
399  return;
400  }
401 
402  assert(LLTy.isVector() && !PartLLT.isVector());
403 
404  LLT DstEltTy = LLTy.getElementType();
405 
406  // Pointer information was discarded. We'll need to coerce some register types
407  // to avoid violating type constraints.
408  LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
409 
410  assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
411 
412  if (DstEltTy == PartLLT) {
413  // Vector was trivially scalarized.
414 
415  if (RealDstEltTy.isPointer()) {
416  for (Register Reg : Regs)
417  MRI.setType(Reg, RealDstEltTy);
418  }
419 
420  B.buildBuildVector(OrigRegs[0], Regs);
421  } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
422  // Deal with vector with 64-bit elements decomposed to 32-bit
423  // registers. Need to create intermediate 64-bit elements.
424  SmallVector<Register, 8> EltMerges;
425  int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
426 
427  assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
428 
429  for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
430  auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
431  // Fix the type in case this is really a vector of pointers.
432  MRI.setType(Merge.getReg(0), RealDstEltTy);
433  EltMerges.push_back(Merge.getReg(0));
434  Regs = Regs.drop_front(PartsPerElt);
435  }
436 
437  B.buildBuildVector(OrigRegs[0], EltMerges);
438  } else {
439  // Vector was split, and elements promoted to a wider type.
440  // FIXME: Should handle floating point promotions.
441  LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
442  auto BV = B.buildBuildVector(BVType, Regs);
443  B.buildTrunc(OrigRegs[0], BV);
444  }
445 }
446 
447 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
448 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
449 /// contain the type of scalar value extension if necessary.
450 ///
451 /// This is used for outgoing values (vregs to physregs)
453  Register SrcReg, LLT SrcTy, LLT PartTy,
454  unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
455  // We could just insert a regular copy, but this is unreachable at the moment.
456  assert(SrcTy != PartTy && "identical part types shouldn't reach here");
457 
458  const unsigned PartSize = PartTy.getSizeInBits();
459 
460  if (PartTy.isVector() == SrcTy.isVector() &&
461  PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
462  assert(DstRegs.size() == 1);
463  B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
464  return;
465  }
466 
467  if (SrcTy.isVector() && !PartTy.isVector() &&
468  PartSize > SrcTy.getElementType().getSizeInBits()) {
469  // Vector was scalarized, and the elements extended.
470  auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
471  for (int i = 0, e = DstRegs.size(); i != e; ++i)
472  B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
473  return;
474  }
475 
476  LLT GCDTy = getGCDType(SrcTy, PartTy);
477  if (GCDTy == PartTy) {
478  // If this already evenly divisible, we can create a simple unmerge.
479  B.buildUnmerge(DstRegs, SrcReg);
480  return;
481  }
482 
483  MachineRegisterInfo &MRI = *B.getMRI();
484  LLT DstTy = MRI.getType(DstRegs[0]);
485  LLT LCMTy = getLCMType(SrcTy, PartTy);
486 
487  const unsigned LCMSize = LCMTy.getSizeInBits();
488  const unsigned DstSize = DstTy.getSizeInBits();
489  const unsigned SrcSize = SrcTy.getSizeInBits();
490 
491  Register UnmergeSrc = SrcReg;
492  if (LCMSize != SrcSize) {
493  // Widen to the common type.
494  Register Undef = B.buildUndef(SrcTy).getReg(0);
495  SmallVector<Register, 8> MergeParts(1, SrcReg);
496  for (unsigned Size = SrcSize; Size != LCMSize; Size += SrcSize)
497  MergeParts.push_back(Undef);
498 
499  UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
500  }
501 
502  // Unmerge to the original registers and pad with dead defs.
503  SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
504  for (unsigned Size = DstSize * DstRegs.size(); Size != LCMSize;
505  Size += DstSize) {
506  UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
507  }
508 
509  B.buildUnmerge(UnmergeResults, UnmergeSrc);
510 }
511 
513  ValueHandler &Handler, ValueAssigner &Assigner,
515  CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
516  MachineFunction &MF = MIRBuilder.getMF();
517  const Function &F = MF.getFunction();
519 
520  CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
521  if (!determineAssignments(Assigner, Args, CCInfo))
522  return false;
523 
524  return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
525  ThisReturnReg);
526 }
527 
528 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
529  if (Flags.isSExt())
530  return TargetOpcode::G_SEXT;
531  if (Flags.isZExt())
532  return TargetOpcode::G_ZEXT;
533  return TargetOpcode::G_ANYEXT;
534 }
535 
538  CCState &CCInfo) const {
539  LLVMContext &Ctx = CCInfo.getContext();
540  const CallingConv::ID CallConv = CCInfo.getCallingConv();
541 
542  unsigned NumArgs = Args.size();
543  for (unsigned i = 0; i != NumArgs; ++i) {
544  EVT CurVT = EVT::getEVT(Args[i].Ty);
545 
546  MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
547 
548  // If we need to split the type over multiple regs, check it's a scenario
549  // we currently support.
550  unsigned NumParts =
551  TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
552 
553  if (NumParts == 1) {
554  // Try to use the register type if we couldn't assign the VT.
555  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
556  Args[i].Flags[0], CCInfo))
557  return false;
558  continue;
559  }
560 
561  // For incoming arguments (physregs to vregs), we could have values in
562  // physregs (or memlocs) which we want to extract and copy to vregs.
563  // During this, we might have to deal with the LLT being split across
564  // multiple regs, so we have to record this information for later.
565  //
566  // If we have outgoing args, then we have the opposite case. We have a
567  // vreg with an LLT which we want to assign to a physical location, and
568  // we might have to record that the value has to be split later.
569 
570  // We're handling an incoming arg which is split over multiple regs.
571  // E.g. passing an s128 on AArch64.
572  ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
573  Args[i].Flags.clear();
574 
575  for (unsigned Part = 0; Part < NumParts; ++Part) {
576  ISD::ArgFlagsTy Flags = OrigFlags;
577  if (Part == 0) {
578  Flags.setSplit();
579  } else {
580  Flags.setOrigAlign(Align(1));
581  if (Part == NumParts - 1)
582  Flags.setSplitEnd();
583  }
584 
585  if (!Assigner.isIncomingArgumentHandler()) {
586  // TODO: Also check if there is a valid extension that preserves the
587  // bits. However currently this call lowering doesn't support non-exact
588  // split parts, so that can't be tested.
589  if (OrigFlags.isReturned() &&
590  (NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
591  Flags.setReturned(false);
592  }
593  }
594 
595  Args[i].Flags.push_back(Flags);
596  if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
597  Args[i].Flags[Part], CCInfo)) {
598  // Still couldn't assign this smaller part type for some reason.
599  return false;
600  }
601  }
602  }
603 
604  return true;
605 }
606 
609  CCState &CCInfo,
611  MachineIRBuilder &MIRBuilder,
612  Register ThisReturnReg) const {
613  MachineFunction &MF = MIRBuilder.getMF();
615  const Function &F = MF.getFunction();
616  const DataLayout &DL = F.getParent()->getDataLayout();
617 
618  const unsigned NumArgs = Args.size();
619 
620  for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
621  assert(j < ArgLocs.size() && "Skipped too many arg locs");
622  CCValAssign &VA = ArgLocs[j];
623  assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
624 
625  if (VA.needsCustom()) {
626  unsigned NumArgRegs =
627  Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
628  if (!NumArgRegs)
629  return false;
630  j += NumArgRegs;
631  continue;
632  }
633 
634  const MVT ValVT = VA.getValVT();
635  const MVT LocVT = VA.getLocVT();
636 
637  const LLT LocTy(LocVT);
638  const LLT ValTy(ValVT);
639  const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
640  const EVT OrigVT = EVT::getEVT(Args[i].Ty);
641  const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
642 
643  // Expected to be multiple regs for a single incoming arg.
644  // There should be Regs.size() ArgLocs per argument.
645  // This should be the same as getNumRegistersForCallingConv
646  const unsigned NumParts = Args[i].Flags.size();
647 
648  // Now split the registers into the assigned types.
649  Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
650 
651  if (NumParts != 1 || NewLLT != OrigTy) {
652  // If we can't directly assign the register, we need one or more
653  // intermediate values.
654  Args[i].Regs.resize(NumParts);
655 
656  // For each split register, create and assign a vreg that will store
657  // the incoming component of the larger value. These will later be
658  // merged to form the final vreg.
659  for (unsigned Part = 0; Part < NumParts; ++Part)
660  Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
661  }
662 
663  assert((j + (NumParts - 1)) < ArgLocs.size() &&
664  "Too many regs for number of args");
665 
666  // Coerce into outgoing value types before register assignment.
667  if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
668  assert(Args[i].OrigRegs.size() == 1);
669  buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
670  ValTy, extendOpFromFlags(Args[i].Flags[0]));
671  }
672 
673  for (unsigned Part = 0; Part < NumParts; ++Part) {
674  Register ArgReg = Args[i].Regs[Part];
675  // There should be Regs.size() ArgLocs per argument.
676  VA = ArgLocs[j + Part];
677  const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
678 
679  if (VA.isMemLoc() && !Flags.isByVal()) {
680  // Individual pieces may have been spilled to the stack and others
681  // passed in registers.
682 
683  // TODO: The memory size may be larger than the value we need to
684  // store. We may need to adjust the offset for big endian targets.
685  uint64_t MemSize = Handler.getStackValueStoreSize(DL, VA);
686 
687  MachinePointerInfo MPO;
688  Register StackAddr =
689  Handler.getStackAddress(MemSize, VA.getLocMemOffset(), MPO, Flags);
690 
691  Handler.assignValueToAddress(Args[i], Part, StackAddr, MemSize, MPO,
692  VA);
693  continue;
694  }
695 
696  if (VA.isMemLoc() && Flags.isByVal()) {
697  assert(Args[i].Regs.size() == 1 &&
698  "didn't expect split byval pointer");
699 
700  if (Handler.isIncomingArgumentHandler()) {
701  // We just need to copy the frame index value to the pointer.
702  MachinePointerInfo MPO;
703  Register StackAddr = Handler.getStackAddress(
704  Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
705  MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
706  } else {
707  // For outgoing byval arguments, insert the implicit copy byval
708  // implies, such that writes in the callee do not modify the caller's
709  // value.
710  uint64_t MemSize = Flags.getByValSize();
711  int64_t Offset = VA.getLocMemOffset();
712 
713  MachinePointerInfo DstMPO;
714  Register StackAddr =
715  Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
716 
717  MachinePointerInfo SrcMPO(Args[i].OrigValue);
718  if (!Args[i].OrigValue) {
719  // We still need to accurately track the stack address space if we
720  // don't know the underlying value.
721  const LLT PtrTy = MRI.getType(StackAddr);
722  SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
723  }
724 
725  Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
726  inferAlignFromPtrInfo(MF, DstMPO));
727 
728  Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
729  inferAlignFromPtrInfo(MF, SrcMPO));
730 
731  Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
732  DstMPO, DstAlign, SrcMPO, SrcAlign,
733  MemSize, VA);
734  }
735  continue;
736  }
737 
738  assert(!VA.needsCustom() && "custom loc should have been handled already");
739 
740  if (i == 0 && ThisReturnReg.isValid() &&
741  Handler.isIncomingArgumentHandler() &&
743  Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
744  continue;
745  }
746 
747  Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
748  }
749 
750  // Now that all pieces have been assigned, re-pack the register typed values
751  // into the original value typed registers.
752  if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
753  // Merge the split registers into the expected larger result vregs of
754  // the original call.
755  buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
756  LocTy, Args[i].Flags[0]);
757  }
758 
759  j += NumParts - 1;
760  }
761 
762  return true;
763 }
764 
766  ArrayRef<Register> VRegs, Register DemoteReg,
767  int FI) const {
768  MachineFunction &MF = MIRBuilder.getMF();
770  const DataLayout &DL = MF.getDataLayout();
771 
772  SmallVector<EVT, 4> SplitVTs;
774  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
775 
776  assert(VRegs.size() == SplitVTs.size());
777 
778  unsigned NumValues = SplitVTs.size();
779  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
780  Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
781  LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
782 
784 
785  for (unsigned I = 0; I < NumValues; ++I) {
786  Register Addr;
787  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
788  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
789  MRI.getType(VRegs[I]).getSizeInBytes(),
790  commonAlignment(BaseAlign, Offsets[I]));
791  MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
792  }
793 }
794 
796  ArrayRef<Register> VRegs,
797  Register DemoteReg) const {
798  MachineFunction &MF = MIRBuilder.getMF();
800  const DataLayout &DL = MF.getDataLayout();
801 
802  SmallVector<EVT, 4> SplitVTs;
804  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
805 
806  assert(VRegs.size() == SplitVTs.size());
807 
808  unsigned NumValues = SplitVTs.size();
809  Align BaseAlign = DL.getPrefTypeAlign(RetTy);
810  unsigned AS = DL.getAllocaAddrSpace();
811  LLT OffsetLLTy =
812  getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
813 
814  MachinePointerInfo PtrInfo(AS);
815 
816  for (unsigned I = 0; I < NumValues; ++I) {
817  Register Addr;
818  MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
819  auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
820  MRI.getType(VRegs[I]).getSizeInBytes(),
821  commonAlignment(BaseAlign, Offsets[I]));
822  MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
823  }
824 }
825 
827  const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
828  MachineRegisterInfo &MRI, const DataLayout &DL) const {
829  unsigned AS = DL.getAllocaAddrSpace();
830  DemoteReg = MRI.createGenericVirtualRegister(
831  LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
832 
833  Type *PtrTy = PointerType::get(F.getReturnType(), AS);
834 
835  SmallVector<EVT, 1> ValueVTs;
836  ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
837 
838  // NOTE: Assume that a pointer won't get split into more than one VT.
839  assert(ValueVTs.size() == 1);
840 
841  ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()));
843  DemoteArg.Flags[0].setSRet();
844  SplitArgs.insert(SplitArgs.begin(), DemoteArg);
845 }
846 
848  const CallBase &CB,
849  CallLoweringInfo &Info) const {
850  const DataLayout &DL = MIRBuilder.getDataLayout();
851  Type *RetTy = CB.getType();
852  unsigned AS = DL.getAllocaAddrSpace();
853  LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
854 
855  int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
856  DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
857 
858  Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
859  ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS));
860  setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
861  DemoteArg.Flags[0].setSRet();
862 
863  Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
864  Info.DemoteStackIndex = FI;
865  Info.DemoteRegister = DemoteReg;
866 }
867 
870  CCAssignFn *Fn) const {
871  for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
872  MVT VT = MVT::getVT(Outs[I].Ty);
873  if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
874  return false;
875  }
876  return true;
877 }
878 
882  const DataLayout &DL) const {
883  LLVMContext &Context = RetTy->getContext();
885 
886  SmallVector<EVT, 4> SplitVTs;
887  ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
889 
890  for (EVT VT : SplitVTs) {
891  unsigned NumParts =
892  TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
893  MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
894  Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
895 
896  for (unsigned I = 0; I < NumParts; ++I) {
897  Outs.emplace_back(PartTy, Flags);
898  }
899  }
900 }
901 
903  const auto &F = MF.getFunction();
904  Type *ReturnType = F.getReturnType();
905  CallingConv::ID CallConv = F.getCallingConv();
906 
907  SmallVector<BaseArgInfo, 4> SplitArgs;
908  getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
909  MF.getDataLayout());
910  return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
911 }
912 
914  const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
915  const SmallVectorImpl<CCValAssign> &OutLocs,
916  const SmallVectorImpl<ArgInfo> &OutArgs) const {
917  for (unsigned i = 0; i < OutLocs.size(); ++i) {
918  auto &ArgLoc = OutLocs[i];
919  // If it's not a register, it's fine.
920  if (!ArgLoc.isRegLoc())
921  continue;
922 
923  MCRegister PhysReg = ArgLoc.getLocReg();
924 
925  // Only look at callee-saved registers.
926  if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
927  continue;
928 
929  LLVM_DEBUG(
930  dbgs()
931  << "... Call has an argument passed in a callee-saved register.\n");
932 
933  // Check if it was copied from.
934  const ArgInfo &OutInfo = OutArgs[i];
935 
936  if (OutInfo.Regs.size() > 1) {
937  LLVM_DEBUG(
938  dbgs() << "... Cannot handle arguments in multiple registers.\n");
939  return false;
940  }
941 
942  // Check if we copy the register, walking through copies from virtual
943  // registers. Note that getDefIgnoringCopies does not ignore copies from
944  // physical registers.
945  MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
946  if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
947  LLVM_DEBUG(
948  dbgs()
949  << "... Parameter was not copied into a VReg, cannot tail call.\n");
950  return false;
951  }
952 
953  // Got a copy. Verify that it's the same as the register we want.
954  Register CopyRHS = RegDef->getOperand(1).getReg();
955  if (CopyRHS != PhysReg) {
956  LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
957  "VReg, cannot tail call.\n");
958  return false;
959  }
960  }
961 
962  return true;
963 }
964 
966  MachineFunction &MF,
967  SmallVectorImpl<ArgInfo> &InArgs,
968  ValueAssigner &CalleeAssigner,
969  ValueAssigner &CallerAssigner) const {
970  const Function &F = MF.getFunction();
971  CallingConv::ID CalleeCC = Info.CallConv;
972  CallingConv::ID CallerCC = F.getCallingConv();
973 
974  if (CallerCC == CalleeCC)
975  return true;
976 
978  CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
979  if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
980  return false;
981 
983  CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
984  if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
985  return false;
986 
987  // We need the argument locations to match up exactly. If there's more in
988  // one than the other, then we are done.
989  if (ArgLocs1.size() != ArgLocs2.size())
990  return false;
991 
992  // Make sure that each location is passed in exactly the same way.
993  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
994  const CCValAssign &Loc1 = ArgLocs1[i];
995  const CCValAssign &Loc2 = ArgLocs2[i];
996 
997  // We need both of them to be the same. So if one is a register and one
998  // isn't, we're done.
999  if (Loc1.isRegLoc() != Loc2.isRegLoc())
1000  return false;
1001 
1002  if (Loc1.isRegLoc()) {
1003  // If they don't have the same register location, we're done.
1004  if (Loc1.getLocReg() != Loc2.getLocReg())
1005  return false;
1006 
1007  // They matched, so we can move to the next ArgLoc.
1008  continue;
1009  }
1010 
1011  // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1012  if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1013  return false;
1014  }
1015 
1016  return true;
1017 }
1018 
1020  const DataLayout &DL, const CCValAssign &VA) const {
1021  const EVT ValVT = VA.getValVT();
1022  if (ValVT != MVT::iPTR)
1023  return ValVT.getStoreSize();
1024 
1025  /// FIXME: We need to get the correct pointer address space.
1026  return DL.getPointerSize();
1027 }
1028 
1030  const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1031  const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1032  const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1033  CCValAssign &VA) const {
1034  MachineFunction &MF = MIRBuilder.getMF();
1036  SrcPtrInfo,
1038  SrcAlign);
1039 
1041  DstPtrInfo,
1043  MemSize, DstAlign);
1044 
1045  const LLT PtrTy = MRI.getType(DstPtr);
1046  const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1047 
1048  auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1049  MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1050 }
1051 
1053  CCValAssign &VA,
1054  unsigned MaxSizeBits) {
1055  LLT LocTy{VA.getLocVT()};
1056  LLT ValTy{VA.getValVT()};
1057 
1058  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1059  return ValReg;
1060 
1061  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1062  if (MaxSizeBits <= ValTy.getSizeInBits())
1063  return ValReg;
1064  LocTy = LLT::scalar(MaxSizeBits);
1065  }
1066 
1067  switch (VA.getLocInfo()) {
1068  default: break;
1069  case CCValAssign::Full:
1070  case CCValAssign::BCvt:
1071  // FIXME: bitconverting between vector types may or may not be a
1072  // nop in big-endian situations.
1073  return ValReg;
1074  case CCValAssign::AExt: {
1075  auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1076  return MIB.getReg(0);
1077  }
1078  case CCValAssign::SExt: {
1079  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1080  MIRBuilder.buildSExt(NewReg, ValReg);
1081  return NewReg;
1082  }
1083  case CCValAssign::ZExt: {
1084  Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1085  MIRBuilder.buildZExt(NewReg, ValReg);
1086  return NewReg;
1087  }
1088  }
1089  llvm_unreachable("unable to extend register");
1090 }
1091 
1092 void CallLowering::ValueAssigner::anchor() {}
1093 
1095  Register SrcReg,
1096  LLT NarrowTy) {
1097  switch (VA.getLocInfo()) {
1098  case CCValAssign::LocInfo::ZExt: {
1099  return MIRBuilder
1100  .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1101  NarrowTy.getScalarSizeInBits())
1102  .getReg(0);
1103  }
1104  case CCValAssign::LocInfo::SExt: {
1105  return MIRBuilder
1106  .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1107  NarrowTy.getScalarSizeInBits())
1108  .getReg(0);
1109  break;
1110  }
1111  default:
1112  return SrcReg;
1113  }
1114 }
1115 
1116 /// Check if we can use a basic COPY instruction between the two types.
1117 ///
1118 /// We're currently building on top of the infrastructure using MVT, which loses
1119 /// pointer information in the CCValAssign. We accept copies from physical
1120 /// registers that have been reported as integers if it's to an equivalent sized
1121 /// pointer LLT.
1122 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1123  if (SrcTy == DstTy)
1124  return true;
1125 
1126  if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1127  return false;
1128 
1129  SrcTy = SrcTy.getScalarType();
1130  DstTy = DstTy.getScalarType();
1131 
1132  return (SrcTy.isPointer() && DstTy.isScalar()) ||
1133  (DstTy.isScalar() && SrcTy.isPointer());
1134 }
1135 
1137  Register PhysReg,
1138  CCValAssign &VA) {
1139  const MVT LocVT = VA.getLocVT();
1140  const LLT LocTy(LocVT);
1141  const LLT RegTy = MRI.getType(ValVReg);
1142 
1143  if (isCopyCompatibleType(RegTy, LocTy)) {
1144  MIRBuilder.buildCopy(ValVReg, PhysReg);
1145  return;
1146  }
1147 
1148  auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1149  auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1150  MIRBuilder.buildTrunc(ValVReg, Hint);
1151 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::CCValAssign::getLocVT
MVT getLocVT() const
Definition: CallingConvLower.h:153
i
i
Definition: README.txt:29
llvm::ISD::ArgFlagsTy::isInAlloca
bool isInAlloca() const
Definition: TargetCallingConv.h:91
llvm::CallLowering::ValueAssigner
Argument handling is mostly uniform between the four places that make these decisions: function forma...
Definition: CallLowering.h:148
llvm::CCValAssign::ZExt
@ ZExt
Definition: CallingConvLower.h:38
llvm::ISD::ArgFlagsTy::isReturned
bool isReturned() const
Definition: TargetCallingConv.h:121
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::getDefIgnoringCopies
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
Definition: Utils.cpp:404
Merge
R600 Clause Merge
Definition: R600ClauseMergePass.cpp:69
llvm
Definition: AllocatorList.h:23
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::CCValAssign::Full
@ Full
Definition: CallingConvLower.h:36
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::MachineOperand::CreateReg
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Definition: MachineOperand.h:791
llvm::TargetLowering::functionArgumentNeedsConsecutiveRegisters
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Definition: TargetLowering.h:4003
llvm::ISD::ArgFlagsTy::setSwiftSelf
void setSwiftSelf()
Definition: TargetCallingConv.h:98
llvm::LLT::getScalarSizeInBits
unsigned getScalarSizeInBits() const
Definition: LowLevelTypeImpl.h:163
llvm::ISD::ArgFlagsTy::setNest
void setNest()
Definition: TargetCallingConv.h:119
llvm::CallLowering::handleAssignments
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, Register ThisReturnReg=Register()) const
Use Handler to insert code to handle the argument/return values represented by Args.
Definition: CallLowering.cpp:607
CallLowering.h
llvm::CCState
CCState - This class holds information needed while lowering arguments and return values.
Definition: CallingConvLower.h:191
buildCopyToRegs
static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)
Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...
Definition: CallLowering.cpp:452
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:52
llvm::Function
Definition: Function.h:61
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:687
llvm::ISD::ArgFlagsTy::setMemAlign
void setMemAlign(Align A)
Definition: TargetCallingConv.h:148
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1167
llvm::MachineFunction::getMachineMemOperand
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Definition: MachineFunction.cpp:430
llvm::CallLowering::ValueHandler::extendRegister
Register extendRegister(Register ValReg, CCValAssign &VA, unsigned MaxSizeBits=0)
Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.
Definition: CallLowering.cpp:1052
llvm::CallLowering::ValueHandler
Definition: CallLowering.h:216
llvm::CallLowering::insertSRetOutgoingArgument
void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const
For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.
Definition: CallLowering.cpp:847
llvm::LLT::getScalarType
LLT getScalarType() const
Definition: LowLevelTypeImpl.h:121
llvm::ISD::ArgFlagsTy::isZExt
bool isZExt() const
Definition: TargetCallingConv.h:73
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
llvm::CallLowering::ValueHandler::assignValueToReg
virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA)=0
The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...
Module.h
llvm::AttributeList
Definition: Attributes.h:385
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1471
llvm::MachineMemOperand
A description of a memory reference used in the backend.
Definition: MachineMemOperand.h:127
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1244
llvm::MachineMemOperand::MODereferenceable
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
Definition: MachineMemOperand.h:142
extendOpFromFlags
static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)
Definition: CallLowering.cpp:528
llvm::CallBase::isMustTailCall
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Definition: Instructions.cpp:294
llvm::CCValAssign::BCvt
@ BCvt
Definition: CallingConvLower.h:46
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::CallLowering::ValueHandler::assignCustomValue
virtual unsigned assignCustomValue(const ArgInfo &Arg, ArrayRef< CCValAssign > VAs)
Handle custom values, which may be passed into one or more of VAs.
Definition: CallLowering.h:278
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
MachineIRBuilder.h
llvm::CallLowering::ValueHandler::getStackAddress
virtual Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0
Materialize a VReg containing the address of the specified stack-based object.
buildCopyFromRegs
static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)
Create a sequence of instructions to combine pieces split into register typed values to the original ...
Definition: CallLowering.cpp:313
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
llvm::MachineIRBuilder::buildConstant
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Definition: MachineIRBuilder.cpp:255
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::FunctionType::isVarArg
bool isVarArg() const
Definition: DerivedTypes.h:122
llvm::ISD::ArgFlagsTy::isSwiftSelf
bool isSwiftSelf() const
Definition: TargetCallingConv.h:97
MachineRegisterInfo.h
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:124
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition: LowLevelType.cpp:21
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
TargetLowering.h
llvm::getGCDType
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
Definition: Utils.cpp:804
llvm::EVT::getStoreSize
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition: ValueTypes.h:355
llvm::CallLowering::getTLI
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
Definition: CallLowering.h:323
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:586
llvm::CCValAssign::AExt
@ AExt
Definition: CallingConvLower.h:39
llvm::CallLowering::resultsCompatible
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
Definition: CallLowering.cpp:965
llvm::CCValAssign
CCValAssign - Represent assignment of one arg/retval to a location.
Definition: CallingConvLower.h:33
TargetMachine.h
llvm::MachineIRBuilder::buildZExt
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
Definition: MachineIRBuilder.cpp:426
llvm::MachineIRBuilder::buildLoad
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
Definition: MachineIRBuilder.h:840
llvm::ISD::ArgFlagsTy::setReturned
void setReturned(bool V=true)
Definition: TargetCallingConv.h:122
llvm::MachineRegisterInfo::setType
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Definition: MachineRegisterInfo.cpp:182
llvm::CallLowering::checkReturnTypeForCallConv
bool checkReturnTypeForCallConv(MachineFunction &MF) const
Toplevel function to check the return type based on the target calling convention.
Definition: CallLowering.cpp:902
llvm::ISD::ArgFlagsTy::isByVal
bool isByVal() const
Definition: TargetCallingConv.h:85
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::CCValAssign::getLocReg
Register getLocReg() const
Definition: CallingConvLower.h:150
llvm::LLT::changeNumElements
LLT changeNumElements(unsigned NewNumElts) const
Return a vector or scalar with the same element type and the new number of elements.
Definition: LowLevelTypeImpl.h:143
llvm::CallLowering::ValueHandler::assignValueToAddress
virtual void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA)=0
The specified value has been assigned to a stack location.
llvm::LLT::getSizeInBits
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelTypeImpl.h:109
llvm::CallLowering::ArgInfo
Definition: CallLowering.h:61
llvm::EVT
Extended Value Type.
Definition: ValueTypes.h:35
llvm::MachineInstr::getOperand
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:488
mergeVectorRegsToResultRegs
static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)
Pack values SrcRegs to cover the vector type result DstRegs.
Definition: CallLowering.cpp:257
llvm::LLT::getSizeInBytes
unsigned getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
Definition: LowLevelTypeImpl.h:117
Utils.h
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::MachineIRBuilder::getDataLayout
const DataLayout & getDataLayout() const
Definition: MachineIRBuilder.h:280
llvm::CallLowering::determineAssignments
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
Definition: CallLowering.cpp:536
llvm::MachineIRBuilder::getMF
MachineFunction & getMF()
Getter for the function we currently build.
Definition: MachineIRBuilder.h:270
llvm::CCState::getContext
LLVMContext & getContext() const
Definition: CallingConvLower.h:257
llvm::LLT::vector
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelTypeImpl.h:58
Info
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
llvm::ISD::ArgFlagsTy::setInReg
void setInReg()
Definition: TargetCallingConv.h:80
llvm::CCValAssign::getLocInfo
LocInfo getLocInfo() const
Definition: CallingConvLower.h:155
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::CCValAssign::getLocMemOffset
unsigned getLocMemOffset() const
Definition: CallingConvLower.h:151
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1452
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:304
llvm::EVT::getTypeForEVT
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:180
llvm::CCValAssign::isRegLoc
bool isRegLoc() const
Definition: CallingConvLower.h:145
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineInstrBuilder::getReg
Register getReg(unsigned Idx) const
Get the register for the operand index.
Definition: MachineInstrBuilder.h:94
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::ISD::ArgFlagsTy::setSExt
void setSExt()
Definition: TargetCallingConv.h:77
llvm::CallLowering::ValueAssigner::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:164
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:249
llvm::CCAssignFn
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
Definition: CallingConvLower.h:177
llvm::LLT::pointer
static LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelTypeImpl.h:50
llvm::LLT::getAddressSpace
unsigned getAddressSpace() const
Definition: LowLevelTypeImpl.h:178
llvm::getLCMType
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
Definition: Utils.cpp:759
llvm::CallLowering::isTypeIsValidForThisReturn
virtual bool isTypeIsValidForThisReturn(EVT Ty) const
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
Definition: CallLowering.h:573
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:388
llvm::ISD::ArgFlagsTy::setSplit
void setSplit()
Definition: TargetCallingConv.h:133
llvm::MachineOperand::clobbersPhysReg
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
Definition: MachineOperand.h:617
llvm::TargetLoweringBase::getNumRegistersForCallingConv
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
Definition: TargetLowering.h:1517
llvm::CallBase::isTailCall
bool isTailCall() const
Tests if this call site is marked as a tail call.
Definition: Instructions.cpp:301
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
llvm::MachineIRBuilder
Helper class to build MachineInstr.
Definition: MachineIRBuilder.h:220
llvm::EVT::getSizeInBits
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:333
llvm::CCValAssign::SExt
@ SExt
Definition: CallingConvLower.h:37
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:64
llvm::MachineInstrBuilder
Definition: MachineInstrBuilder.h:69
llvm::MachineIRBuilder::buildAssertSExt
MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_SEXT Op, Size.
Definition: MachineIRBuilder.cpp:243
llvm::CallLowering::checkReturn
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
Definition: CallLowering.cpp:868
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:80
llvm::MachineIRBuilder::buildAssertZExt
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
Definition: MachineIRBuilder.cpp:249
llvm::MachinePointerInfo
This class contains a discriminated union of information about pointers in memory operands,...
Definition: MachineMemOperand.h:37
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::inferAlignFromPtrInfo
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
Definition: Utils.cpp:608
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:532
I
#define I(x, y, z)
Definition: MD5.cpp:59
Analysis.h
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:71
llvm::LLT::isVector
bool isVector() const
Definition: LowLevelTypeImpl.h:96
llvm::LLT::getNumElements
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelTypeImpl.h:100
llvm::ISD::ArgFlagsTy::setOrigAlign
void setOrigAlign(Align A)
Definition: TargetCallingConv.h:164
llvm::CCState::getCallingConv
CallingConv::ID getCallingConv() const
Definition: CallingConvLower.h:259
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::LLT::isPointer
bool isPointer() const
Definition: LowLevelTypeImpl.h:94
llvm::RegState::Undef
@ Undef
Value of the register doesn't matter.
Definition: MachineInstrBuilder.h:52
llvm::MVT::getSizeInBits
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
Definition: MachineValueType.h:824
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:592
llvm::ISD::ArgFlagsTy::setSwiftError
void setSwiftError()
Definition: TargetCallingConv.h:104
isCopyCompatibleType
static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)
Check if we can use a basic COPY instruction between the two types.
Definition: CallLowering.cpp:1122
llvm::Function::hasAttribute
bool hasAttribute(unsigned i, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.h:444
function
print Print MemDeps of function
Definition: MemDepPrinter.cpp:83
llvm::MachineRegisterInfo::createGenericVirtualRegister
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Definition: MachineRegisterInfo.cpp:188
llvm::MVT
Machine Value Type.
Definition: MachineValueType.h:30
llvm::MachineOperand::getReg
Register getReg() const
getReg - Returns the register number.
Definition: MachineOperand.h:360
llvm::ISD::ArgFlagsTy::setByVal
void setByVal()
Definition: TargetCallingConv.h:86
llvm::LLT::isScalar
bool isScalar() const
Definition: LowLevelTypeImpl.h:92
llvm::ARM::WinEH::ReturnType
ReturnType
Definition: ARMWinEH.h:25
llvm::CallLowering::addArgFlagsFromAttributes
void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const
Adds flags to Flags based off of the attributes in Attrs.
Definition: CallLowering.cpp:72
llvm::MachineFunction
Definition: MachineFunction.h:230
addFlagsUsingAttrFn
static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)
Helper function which updates Flags when AttrFn returns true.
Definition: CallLowering.cpp:35
llvm::ISD::ArgFlagsTy::setByValSize
void setByValSize(unsigned S)
Definition: TargetCallingConv.h:173
llvm::CCValAssign::getValNo
unsigned getValNo() const
Definition: CallingConvLower.h:142
llvm::CallLowering::ValueHandler::copyArgumentMemory
void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const
Do a memory copy of MemSize bytes from SrcPtr to DstPtr.
Definition: CallLowering.cpp:1029
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::MachineOperand::CreateGA
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
Definition: MachineOperand.h:850
DataLayout.h
llvm::MachineFrameInfo::CreateStackObject
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Definition: MachineFrameInfo.cpp:51
llvm::ISD::ArgFlagsTy::setInAlloca
void setInAlloca()
Definition: TargetCallingConv.h:92
llvm::MachineInstr::getOpcode
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:478
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::CallLowering::insertSRetLoads
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
Definition: CallLowering.cpp:765
llvm::MachineIRBuilder::buildCopy
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Definition: MachineIRBuilder.cpp:238
llvm::ISD::ArgFlagsTy::setSplitEnd
void setSplitEnd()
Definition: TargetCallingConv.h:136
uint32_t
llvm::ISD::ArgFlagsTy::isPreallocated
bool isPreallocated() const
Definition: TargetCallingConv.h:94
llvm::ISD::ArgFlagsTy
Definition: TargetCallingConv.h:27
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::MachineIRBuilder::buildExtract
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ...
Definition: MachineIRBuilder.cpp:518
llvm::MVT::iPTR
@ iPTR
Definition: MachineValueType.h:298
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::LLT::changeElementType
LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
Definition: LowLevelTypeImpl.h:127
llvm::MachineMemOperand::MOLoad
@ MOLoad
The memory access reads data.
Definition: MachineMemOperand.h:134
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MachineIRBuilder::buildAnyExt
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
Definition: MachineIRBuilder.cpp:416
llvm::MachineIRBuilder::materializePtrAdd
Optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
Definition: MachineIRBuilder.cpp:193
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:662
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
NumFixedArgs
static unsigned NumFixedArgs
Definition: LanaiISelLowering.cpp:368
llvm::MachineIRBuilder::buildFrameIndex
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
Definition: MachineIRBuilder.cpp:137
llvm::CallLowering::insertSRetIncomingArgument
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
Definition: CallLowering.cpp:826
llvm::CallLowering::unpackRegs
void unpackRegs(ArrayRef< Register > DstRegs, Register SrcReg, Type *PackedTy, MachineIRBuilder &MIRBuilder) const
Generate instructions for unpacking SrcReg into the DstRegs corresponding to the aggregate type Packe...
Definition: CallLowering.cpp:239
llvm::CallLowering::getAttributesForArgIdx
ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const
Definition: CallLowering.cpp:63
llvm::computeValueLLTs
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:132
j
return j(j<< 16)
llvm::CallLowering::canLowerReturn
virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const
This hook must be implemented to check whether the return values described by Outs can fit into the r...
Definition: CallLowering.h:477
llvm::CCValAssign::isMemLoc
bool isMemLoc() const
Definition: CallingConvLower.h:146
llvm::CallLowering::insertSRetStores
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
Definition: CallLowering.cpp:795
llvm::commonAlignment
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
Definition: Alignment.h:211
llvm::ISD::ArgFlagsTy::getByValSize
unsigned getByValSize() const
Definition: TargetCallingConv.h:169
llvm::MachineIRBuilder::buildTrunc
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
Definition: MachineIRBuilder.cpp:738
llvm::MachineIRBuilder::buildMemCpy
MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr, const SrcOp &Size, MachineMemOperand &DstMMO, MachineMemOperand &SrcMMO)
Definition: MachineIRBuilder.h:1840
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:542
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:572
llvm::CallLowering::ValueHandler::getStackValueStoreSize
virtual uint64_t getStackValueStoreSize(const DataLayout &DL, const CCValAssign &VA) const
Return the in-memory size to write for the argument at VA.
Definition: CallLowering.cpp:1019
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:153
llvm::ArrayRef::take_front
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:228
llvm::Function::getFnAttribute
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:365
llvm::CallLowering::IncomingValueHandler::assignValueToReg
void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override
Provides a default implementation for argument handling.
Definition: CallLowering.cpp:1136
llvm::CallLowering::determineAndHandleAssignments
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg=Register()) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
Definition: CallLowering.cpp:512
llvm::Attribute::getValueAsType
Type * getValueAsType() const
Return the attribute's value as a Type.
Definition: Attributes.cpp:311
llvm::ISD::ArgFlagsTy::isSExt
bool isSExt() const
Definition: TargetCallingConv.h:76
llvm::CallLowering::ArgInfo::Regs
SmallVector< Register, 4 > Regs
Definition: CallLowering.h:62
llvm::CallLowering::BaseArgInfo::Ty
Type * Ty
Definition: CallLowering.h:49
llvm::CallLowering::CallLoweringInfo
Definition: CallLowering.h:95
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:728
llvm::CallLowering::IncomingValueHandler::buildExtensionHint
Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy)
Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...
Definition: CallLowering.cpp:1094
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1389
llvm::MachineMemOperand::MOStore
@ MOStore
The memory access writes data.
Definition: MachineMemOperand.h:136
llvm::makeArrayRef
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:476
llvm::CallLowering::splitToValueTypes
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
Definition: CallLowering.cpp:201
Instructions.h
llvm::MachinePointerInfo::getFixedStack
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Definition: MachineOperand.cpp:995
llvm::MachineRegisterInfo::getType
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Definition: MachineRegisterInfo.h:732
llvm::MachineRegisterInfo::cloneVirtualRegister
Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
Definition: MachineRegisterInfo.cpp:172
llvm::ISD::ArgFlagsTy::getNonZeroByValAlign
Align getNonZeroByValAlign() const
Definition: TargetCallingConv.h:153
llvm::MachineIRBuilder::buildSExt
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
Definition: MachineIRBuilder.cpp:421
llvm::CCValAssign::getValVT
MVT getValVT() const
Definition: CallingConvLower.h:143
llvm::ISD::ArgFlagsTy::setSRet
void setSRet()
Definition: TargetCallingConv.h:83
llvm::Register::isValid
bool isValid() const
Definition: Register.h:126
llvm::TargetLoweringBase::getRegisterTypeForCallingConv
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
Definition: TargetLowering.h:1509
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::MVT::getVT
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:500
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:501
llvm::MachineFunction::getDataLayout
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Definition: MachineFunction.cpp:260
llvm::Function::getAttribute
Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const
gets the attribute from the list of attributes.
Definition: Function.h:459
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::CallLowering::getReturnInfo
void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const
Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.
Definition: CallLowering.cpp:879
MachineOperand.h
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1164
llvm::CallLowering::BaseArgInfo::IsFixed
bool IsFixed
Definition: CallLowering.h:51
llvm::ISD::ArgFlagsTy::setPreallocated
void setPreallocated()
Definition: TargetCallingConv.h:95
llvm::CallLowering::ArgInfo::OrigValue
const Value * OrigValue
Optionally track the original IR value for the argument.
Definition: CallLowering.h:72
LLVMContext.h
llvm::CallLowering::ValueAssigner::assignArg
virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)
Wrap call to (typically tablegenerated CCAssignFn).
Definition: CallLowering.h:172
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
llvm::MachineIRBuilder::buildStore
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
Definition: MachineIRBuilder.cpp:388
llvm::LLT::getElementType
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelTypeImpl.h:188
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1246
llvm::CallLowering::parametersInCSRMatch
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
Definition: CallLowering.cpp:913
copy
we should consider alternate ways to model stack dependencies Lots of things could be done in WebAssemblyTargetTransformInfo cpp there are numerous optimization related hooks that can be overridden in WebAssemblyTargetLowering Instead of the OptimizeReturned which should consider preserving the returned attribute through to MachineInstrs and extending the MemIntrinsicResults pass to do this optimization on calls too That would also let the WebAssemblyPeephole pass clean up dead defs for such as it does for stores Consider implementing and or getMachineCombinerPatterns Find a clean way to fix the problem which leads to the Shrink Wrapping pass being run after the WebAssembly PEI pass When setting multiple variables to the same we currently get code like const It could be done with a smaller encoding like local tee $pop5 local copy
Definition: README.txt:101
llvm::LLT::scalar
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelTypeImpl.h:43
llvm::ISD::ArgFlagsTy::setSwiftAsync
void setSwiftAsync()
Definition: TargetCallingConv.h:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AttributeList::FirstArgIndex
@ FirstArgIndex
Definition: Attributes.h:390
llvm::CallLowering::ValueHandler::isIncomingArgumentHandler
bool isIncomingArgumentHandler() const
Returns true if the handler is dealing with incoming arguments, i.e.
Definition: CallLowering.h:230
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:154
llvm::CCValAssign::needsCustom
bool needsCustom() const
Definition: CallingConvLower.h:148
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1322
llvm::ISD::ArgFlagsTy::setZExt
void setZExt()
Definition: TargetCallingConv.h:74
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:907
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:23
llvm::SmallVectorImpl::insert
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:772
llvm::CallLowering::BaseArgInfo::Flags
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Definition: CallLowering.h:50
llvm::LLT
Definition: LowLevelTypeImpl.h:40
llvm::CallLowering::lowerCall
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const
This hook must be implemented to lower the given call instruction, including argument and return valu...
Definition: CallLowering.h:537
llvm::CallLowering::setArgFlags
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
Definition: CallLowering.cpp:152