clang  3.9.0
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallingConv.h"
34 #include "llvm/IR/CallSite.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /***/
44 
45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
46  switch (CC) {
47  default: return llvm::CallingConv::C;
48  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51  case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
52  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56  // TODO: Add support for __pascal to LLVM.
58  // TODO: Add support for __vectorcall to LLVM.
59  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
62  case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
63  case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
64  case CC_Swift: return llvm::CallingConv::Swift;
65  }
66 }
67 
68 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
69 /// qualification.
70 /// FIXME: address space qualification?
72  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
73  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
74 }
75 
76 /// Returns the canonical formal type of the given C++ method.
78  return MD->getType()->getCanonicalTypeUnqualified()
80 }
81 
82 /// Returns the "extra-canonicalized" return type, which discards
83 /// qualifiers on the return type. Codegen doesn't care about them,
84 /// and it makes ABI code a little easier to be able to assume that
85 /// all parameter and return types are top-level unqualified.
88 }
89 
90 /// Arrange the argument and result information for a value of the given
91 /// unprototyped freestanding function type.
92 const CGFunctionInfo &
94  // When translating an unprototyped function type, always use a
95  // variadic type.
96  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
97  /*instanceMethod=*/false,
98  /*chainCall=*/false, None,
99  FTNP->getExtInfo(), {}, RequiredArgs(0));
100 }
101 
102 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
103 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
104 static void appendParameterTypes(const CodeGenTypes &CGT,
108  const FunctionDecl *FD) {
109  // Fill out paramInfos.
110  if (FPT->hasExtParameterInfos() || !paramInfos.empty()) {
111  assert(paramInfos.size() <= prefix.size());
112  auto protoParamInfos = FPT->getExtParameterInfos();
113  paramInfos.reserve(prefix.size() + protoParamInfos.size());
114  paramInfos.resize(prefix.size());
115  paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
116  }
117 
118  // Fast path: unknown target.
119  if (FD == nullptr) {
120  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
121  return;
122  }
123 
124  // In the vast majority cases, we'll have precisely FPT->getNumParams()
125  // parameters; the only thing that can change this is the presence of
126  // pass_object_size. So, we preallocate for the common case.
127  prefix.reserve(prefix.size() + FPT->getNumParams());
128 
129  assert(FD->getNumParams() == FPT->getNumParams());
130  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
131  prefix.push_back(FPT->getParamType(I));
132  if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
133  prefix.push_back(CGT.getContext().getSizeType());
134  }
135 }
136 
137 /// Arrange the LLVM function layout for a value of the given function
138 /// type, on top of any implicit parameters already stored.
139 static const CGFunctionInfo &
140 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
143  const FunctionDecl *FD) {
145  RequiredArgs Required =
146  RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
147  // FIXME: Kill copy.
148  appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);
149  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
150 
151  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
152  /*chainCall=*/false, prefix,
153  FTP->getExtInfo(), paramInfos,
154  Required);
155 }
156 
157 /// Arrange the argument and result information for a value of the
158 /// given freestanding function type.
159 const CGFunctionInfo &
161  const FunctionDecl *FD) {
163  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
164  FTP, FD);
165 }
166 
167 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
168  // Set the appropriate calling convention for the Function.
169  if (D->hasAttr<StdCallAttr>())
170  return CC_X86StdCall;
171 
172  if (D->hasAttr<FastCallAttr>())
173  return CC_X86FastCall;
174 
175  if (D->hasAttr<ThisCallAttr>())
176  return CC_X86ThisCall;
177 
178  if (D->hasAttr<VectorCallAttr>())
179  return CC_X86VectorCall;
180 
181  if (D->hasAttr<PascalAttr>())
182  return CC_X86Pascal;
183 
184  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
185  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
186 
187  if (D->hasAttr<IntelOclBiccAttr>())
188  return CC_IntelOclBicc;
189 
190  if (D->hasAttr<MSABIAttr>())
191  return IsWindows ? CC_C : CC_X86_64Win64;
192 
193  if (D->hasAttr<SysVABIAttr>())
194  return IsWindows ? CC_X86_64SysV : CC_C;
195 
196  if (D->hasAttr<PreserveMostAttr>())
197  return CC_PreserveMost;
198 
199  if (D->hasAttr<PreserveAllAttr>())
200  return CC_PreserveAll;
201 
202  return CC_C;
203 }
204 
205 /// Arrange the argument and result information for a call to an
206 /// unknown C++ non-static member function of the given abstract type.
207 /// (Zero value of RD means we don't have any meaningful "this" argument type,
208 /// so fall back to a generic pointer type).
209 /// The member function must be an ordinary function, i.e. not a
210 /// constructor or destructor.
211 const CGFunctionInfo &
213  const FunctionProtoType *FTP,
214  const CXXMethodDecl *MD) {
216 
217  // Add the 'this' pointer.
218  if (RD)
219  argTypes.push_back(GetThisType(Context, RD));
220  else
221  argTypes.push_back(Context.VoidPtrTy);
222 
224  *this, true, argTypes,
226 }
227 
228 /// Arrange the argument and result information for a declaration or
229 /// definition of the given C++ non-static member function. The
230 /// member function must be an ordinary function, i.e. not a
231 /// constructor or destructor.
232 const CGFunctionInfo &
234  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
235  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
236 
238 
239  if (MD->isInstance()) {
240  // The abstract case is perfectly fine.
241  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
242  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
243  }
244 
245  return arrangeFreeFunctionType(prototype, MD);
246 }
247 
249  const InheritedConstructor &Inherited, CXXCtorType Type) {
250  // Parameters are unnecessary if we're constructing a base class subobject
251  // and the inherited constructor lives in a virtual base.
252  return Type == Ctor_Complete ||
253  !Inherited.getShadowDecl()->constructsVirtualBase() ||
254  !Target.getCXXABI().hasConstructorVariants();
255  }
256 
257 const CGFunctionInfo &
259  StructorType Type) {
260 
263  argTypes.push_back(GetThisType(Context, MD->getParent()));
264 
265  bool PassParams = true;
266 
267  GlobalDecl GD;
268  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
269  GD = GlobalDecl(CD, toCXXCtorType(Type));
270 
271  // A base class inheriting constructor doesn't get forwarded arguments
272  // needed to construct a virtual base (or base class thereof).
273  if (auto Inherited = CD->getInheritedConstructor())
274  PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
275  } else {
276  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
277  GD = GlobalDecl(DD, toCXXDtorType(Type));
278  }
279 
281 
282  // Add the formal parameters.
283  if (PassParams)
284  appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);
285 
286  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
287 
288  RequiredArgs required =
289  (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
291 
292  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
293  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
294  ? argTypes.front()
295  : TheCXXABI.hasMostDerivedReturn(GD)
296  ? CGM.getContext().VoidPtrTy
297  : Context.VoidTy;
298  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
299  /*chainCall=*/false, argTypes, extInfo,
300  paramInfos, required);
301 }
302 
306  for (auto &arg : args)
307  argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
308  return argTypes;
309 }
310 
314  for (auto &arg : args)
315  argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
316  return argTypes;
317 }
318 
321  const FunctionProtoType *proto,
322  unsigned prefixArgs,
323  unsigned totalArgs) {
324  assert(proto->hasExtParameterInfos());
325  assert(paramInfos.size() <= prefixArgs);
326  assert(proto->getNumParams() + prefixArgs <= totalArgs);
327 
328  // Add default infos for any prefix args that don't already have infos.
329  paramInfos.resize(prefixArgs);
330 
331  // Add infos for the prototype.
332  auto protoInfos = proto->getExtParameterInfos();
333  paramInfos.append(protoInfos.begin(), protoInfos.end());
334 
335  // Add default infos for the variadic arguments.
336  paramInfos.resize(totalArgs);
337 }
338 
341  unsigned prefixArgs, unsigned totalArgs) {
343  if (proto->hasExtParameterInfos()) {
344  addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
345  }
346  return result;
347 }
348 
349 /// Arrange a call to a C++ method, passing the given arguments.
350 const CGFunctionInfo &
352  const CXXConstructorDecl *D,
353  CXXCtorType CtorKind,
354  unsigned ExtraArgs) {
355  // FIXME: Kill copy.
357  for (const auto &Arg : args)
358  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
359 
361  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);
362  GlobalDecl GD(D, CtorKind);
363  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
364  ? ArgTypes.front()
365  : TheCXXABI.hasMostDerivedReturn(GD)
366  ? CGM.getContext().VoidPtrTy
367  : Context.VoidTy;
368 
369  FunctionType::ExtInfo Info = FPT->getExtInfo();
370  auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs,
371  ArgTypes.size());
372  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
373  /*chainCall=*/false, ArgTypes, Info,
374  ParamInfos, Required);
375 }
376 
377 /// Arrange the argument and result information for the declaration or
378 /// definition of the given function.
379 const CGFunctionInfo &
381  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
382  if (MD->isInstance())
383  return arrangeCXXMethodDeclaration(MD);
384 
386 
387  assert(isa<FunctionType>(FTy));
388 
389  // When declaring a function without a prototype, always use a
390  // non-variadic type.
391  if (isa<FunctionNoProtoType>(FTy)) {
394  noProto->getReturnType(), /*instanceMethod=*/false,
395  /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
396  }
397 
398  assert(isa<FunctionProtoType>(FTy));
400 }
401 
402 /// Arrange the argument and result information for the declaration or
403 /// definition of an Objective-C method.
404 const CGFunctionInfo &
406  // It happens that this is the same as a call with no optional
407  // arguments, except also using the formal 'self' type.
409 }
410 
411 /// Arrange the argument and result information for the function type
412 /// through which to perform a send to the given Objective-C method,
413 /// using the given receiver type. The receiver type is not always
414 /// the 'self' type of the method or even an Objective-C pointer type.
415 /// This is *not* the right method for actually performing such a
416 /// message send, due to the possibility of optional arguments.
417 const CGFunctionInfo &
419  QualType receiverType) {
421  argTys.push_back(Context.getCanonicalParamType(receiverType));
422  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
423  // FIXME: Kill copy?
424  for (const auto *I : MD->parameters()) {
425  argTys.push_back(Context.getCanonicalParamType(I->getType()));
426  }
427 
428  FunctionType::ExtInfo einfo;
429  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
430  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
431 
432  if (getContext().getLangOpts().ObjCAutoRefCount &&
433  MD->hasAttr<NSReturnsRetainedAttr>())
434  einfo = einfo.withProducesResult(true);
435 
436  RequiredArgs required =
437  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
438 
440  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
441  /*chainCall=*/false, argTys, einfo, {}, required);
442 }
443 
444 const CGFunctionInfo &
446  const CallArgList &args) {
447  auto argTypes = getArgTypesForCall(Context, args);
448  FunctionType::ExtInfo einfo;
449 
451  GetReturnType(returnType), /*instanceMethod=*/false,
452  /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
453 }
454 
455 const CGFunctionInfo &
457  // FIXME: Do we need to handle ObjCMethodDecl?
458  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
459 
460  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
462 
463  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
465 
466  return arrangeFunctionDeclaration(FD);
467 }
468 
469 /// Arrange a thunk that takes 'this' as the first parameter followed by
470 /// varargs. Return a void pointer, regardless of the actual return type.
471 /// The body of the thunk will end in a musttail call to a function of the
472 /// correct type, and the caller will bitcast the function to the correct
473 /// prototype.
474 const CGFunctionInfo &
476  assert(MD->isVirtual() && "only virtual memptrs have thunks");
478  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
479  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
480  /*chainCall=*/false, ArgTys,
481  FTP->getExtInfo(), {}, RequiredArgs(1));
482 }
483 
484 const CGFunctionInfo &
486  CXXCtorType CT) {
487  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
488 
491  const CXXRecordDecl *RD = CD->getParent();
492  ArgTys.push_back(GetThisType(Context, RD));
493  if (CT == Ctor_CopyingClosure)
494  ArgTys.push_back(*FTP->param_type_begin());
495  if (RD->getNumVBases() > 0)
496  ArgTys.push_back(Context.IntTy);
498  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
499  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
500  /*chainCall=*/false, ArgTys,
501  FunctionType::ExtInfo(CC), {},
503 }
504 
505 /// Arrange a call as unto a free function, except possibly with an
506 /// additional number of formal parameters considered required.
507 static const CGFunctionInfo &
509  CodeGenModule &CGM,
510  const CallArgList &args,
511  const FunctionType *fnType,
512  unsigned numExtraRequiredArgs,
513  bool chainCall) {
514  assert(args.size() >= numExtraRequiredArgs);
515 
517 
518  // In most cases, there are no optional arguments.
519  RequiredArgs required = RequiredArgs::All;
520 
521  // If we have a variadic prototype, the required arguments are the
522  // extra prefix plus the arguments in the prototype.
523  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
524  if (proto->isVariadic())
525  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
526 
527  if (proto->hasExtParameterInfos())
528  addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
529  args.size());
530 
531  // If we don't have a prototype at all, but we're supposed to
532  // explicitly use the variadic convention for unprototyped calls,
533  // treat all of the arguments as required but preserve the nominal
534  // possibility of variadics.
535  } else if (CGM.getTargetCodeGenInfo()
536  .isNoProtoCallVariadic(args,
537  cast<FunctionNoProtoType>(fnType))) {
538  required = RequiredArgs(args.size());
539  }
540 
541  // FIXME: Kill copy.
543  for (const auto &arg : args)
544  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
546  /*instanceMethod=*/false, chainCall,
547  argTypes, fnType->getExtInfo(), paramInfos,
548  required);
549 }
550 
551 /// Figure out the rules for calling a function with the given formal
552 /// type using the given arguments. The arguments are necessary
553 /// because the function might be unprototyped, in which case it's
554 /// target-dependent in crazy ways.
555 const CGFunctionInfo &
557  const FunctionType *fnType,
558  bool chainCall) {
559  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
560  chainCall ? 1 : 0, chainCall);
561 }
562 
563 /// A block function is essentially a free function with an
564 /// extra implicit argument.
565 const CGFunctionInfo &
567  const FunctionType *fnType) {
568  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
569  /*chainCall=*/false);
570 }
571 
572 const CGFunctionInfo &
574  const FunctionArgList &params) {
575  auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
576  auto argTypes = getArgTypesForDeclaration(Context, params);
577 
579  GetReturnType(proto->getReturnType()),
580  /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
581  proto->getExtInfo(), paramInfos,
582  RequiredArgs::forPrototypePlus(proto, 1, nullptr));
583 }
584 
585 const CGFunctionInfo &
587  const CallArgList &args) {
588  // FIXME: Kill copy.
590  for (const auto &Arg : args)
591  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
593  GetReturnType(resultType), /*instanceMethod=*/false,
594  /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
595  /*paramInfos=*/ {}, RequiredArgs::All);
596 }
597 
598 const CGFunctionInfo &
600  const FunctionArgList &args) {
601  auto argTypes = getArgTypesForDeclaration(Context, args);
602 
604  GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
605  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
606 }
607 
608 const CGFunctionInfo &
610  ArrayRef<CanQualType> argTypes) {
612  resultType, /*instanceMethod=*/false, /*chainCall=*/false,
613  argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
614 }
615 
616 /// Arrange a call to a C++ method, passing the given arguments.
617 const CGFunctionInfo &
619  const FunctionProtoType *proto,
620  RequiredArgs required) {
621  unsigned numRequiredArgs =
622  (proto->isVariadic() ? required.getNumRequiredArgs() : args.size());
623  unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams();
624  auto paramInfos =
625  getExtParameterInfosForCall(proto, numPrefixArgs, args.size());
626 
627  // FIXME: Kill copy.
628  auto argTypes = getArgTypesForCall(Context, args);
629 
630  FunctionType::ExtInfo info = proto->getExtInfo();
632  GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
633  /*chainCall=*/false, argTypes, info, paramInfos, required);
634 }
635 
638  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
640 }
641 
642 const CGFunctionInfo &
644  const CallArgList &args) {
645  assert(signature.arg_size() <= args.size());
646  if (signature.arg_size() == args.size())
647  return signature;
648 
650  auto sigParamInfos = signature.getExtParameterInfos();
651  if (!sigParamInfos.empty()) {
652  paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
653  paramInfos.resize(args.size());
654  }
655 
656  auto argTypes = getArgTypesForCall(Context, args);
657 
658  assert(signature.getRequiredArgs().allowsOptionalArgs());
659  return arrangeLLVMFunctionInfo(signature.getReturnType(),
660  signature.isInstanceMethod(),
661  signature.isChainCall(),
662  argTypes,
663  signature.getExtInfo(),
664  paramInfos,
665  signature.getRequiredArgs());
666 }
667 
668 /// Arrange the argument and result information for an abstract value
669 /// of a given function type. This is the method which all of the
670 /// above functions ultimately defer to.
671 const CGFunctionInfo &
673  bool instanceMethod,
674  bool chainCall,
675  ArrayRef<CanQualType> argTypes,
678  RequiredArgs required) {
679  assert(std::all_of(argTypes.begin(), argTypes.end(),
680  std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
681 
682  // Lookup or create unique function info.
683  llvm::FoldingSetNodeID ID;
684  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
685  required, resultType, argTypes);
686 
687  void *insertPos = nullptr;
688  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
689  if (FI)
690  return *FI;
691 
692  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
693 
694  // Construct the function info. We co-allocate the ArgInfos.
695  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
696  paramInfos, resultType, argTypes, required);
697  FunctionInfos.InsertNode(FI, insertPos);
698 
699  bool inserted = FunctionsBeingProcessed.insert(FI).second;
700  (void)inserted;
701  assert(inserted && "Recursively being processed?");
702 
703  // Compute ABI information.
704  if (info.getCC() != CC_Swift) {
705  getABIInfo().computeInfo(*FI);
706  } else {
707  swiftcall::computeABIInfo(CGM, *FI);
708  }
709 
710  // Loop over all of the computed argument and return value info. If any of
711  // them are direct or extend without a specified coerce type, specify the
712  // default now.
713  ABIArgInfo &retInfo = FI->getReturnInfo();
714  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
715  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
716 
717  for (auto &I : FI->arguments())
718  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
719  I.info.setCoerceToType(ConvertType(I.type));
720 
721  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
722  assert(erased && "Not in set?");
723 
724  return *FI;
725 }
726 
728  bool instanceMethod,
729  bool chainCall,
730  const FunctionType::ExtInfo &info,
731  ArrayRef<ExtParameterInfo> paramInfos,
732  CanQualType resultType,
733  ArrayRef<CanQualType> argTypes,
734  RequiredArgs required) {
735  assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
736 
737  void *buffer =
738  operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
739  argTypes.size() + 1, paramInfos.size()));
740 
741  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
742  FI->CallingConvention = llvmCC;
743  FI->EffectiveCallingConvention = llvmCC;
744  FI->ASTCallingConvention = info.getCC();
745  FI->InstanceMethod = instanceMethod;
746  FI->ChainCall = chainCall;
747  FI->NoReturn = info.getNoReturn();
748  FI->ReturnsRetained = info.getProducesResult();
749  FI->Required = required;
750  FI->HasRegParm = info.getHasRegParm();
751  FI->RegParm = info.getRegParm();
752  FI->ArgStruct = nullptr;
753  FI->ArgStructAlign = 0;
754  FI->NumArgs = argTypes.size();
755  FI->HasExtParameterInfos = !paramInfos.empty();
756  FI->getArgsBuffer()[0].type = resultType;
757  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
758  FI->getArgsBuffer()[i + 1].type = argTypes[i];
759  for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
760  FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
761  return FI;
762 }
763 
764 /***/
765 
766 namespace {
767 // ABIArgInfo::Expand implementation.
768 
769 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
770 struct TypeExpansion {
771  enum TypeExpansionKind {
772  // Elements of constant arrays are expanded recursively.
773  TEK_ConstantArray,
774  // Record fields are expanded recursively (but if record is a union, only
775  // the field with the largest size is expanded).
776  TEK_Record,
777  // For complex types, real and imaginary parts are expanded recursively.
778  TEK_Complex,
779  // All other types are not expandable.
780  TEK_None
781  };
782 
783  const TypeExpansionKind Kind;
784 
785  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
786  virtual ~TypeExpansion() {}
787 };
788 
789 struct ConstantArrayExpansion : TypeExpansion {
790  QualType EltTy;
791  uint64_t NumElts;
792 
793  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
794  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
795  static bool classof(const TypeExpansion *TE) {
796  return TE->Kind == TEK_ConstantArray;
797  }
798 };
799 
800 struct RecordExpansion : TypeExpansion {
802 
804 
805  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
807  : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
808  Fields(std::move(Fields)) {}
809  static bool classof(const TypeExpansion *TE) {
810  return TE->Kind == TEK_Record;
811  }
812 };
813 
814 struct ComplexExpansion : TypeExpansion {
815  QualType EltTy;
816 
817  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
818  static bool classof(const TypeExpansion *TE) {
819  return TE->Kind == TEK_Complex;
820  }
821 };
822 
823 struct NoExpansion : TypeExpansion {
824  NoExpansion() : TypeExpansion(TEK_None) {}
825  static bool classof(const TypeExpansion *TE) {
826  return TE->Kind == TEK_None;
827  }
828 };
829 } // namespace
830 
831 static std::unique_ptr<TypeExpansion>
833  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
834  return llvm::make_unique<ConstantArrayExpansion>(
835  AT->getElementType(), AT->getSize().getZExtValue());
836  }
837  if (const RecordType *RT = Ty->getAs<RecordType>()) {
840  const RecordDecl *RD = RT->getDecl();
841  assert(!RD->hasFlexibleArrayMember() &&
842  "Cannot expand structure with flexible array.");
843  if (RD->isUnion()) {
844  // Unions can be here only in degenerative cases - all the fields are same
845  // after flattening. Thus we have to use the "largest" field.
846  const FieldDecl *LargestFD = nullptr;
847  CharUnits UnionSize = CharUnits::Zero();
848 
849  for (const auto *FD : RD->fields()) {
850  // Skip zero length bitfields.
851  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
852  continue;
853  assert(!FD->isBitField() &&
854  "Cannot expand structure with bit-field members.");
855  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
856  if (UnionSize < FieldSize) {
857  UnionSize = FieldSize;
858  LargestFD = FD;
859  }
860  }
861  if (LargestFD)
862  Fields.push_back(LargestFD);
863  } else {
864  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
865  assert(!CXXRD->isDynamicClass() &&
866  "cannot expand vtable pointers in dynamic classes");
867  for (const CXXBaseSpecifier &BS : CXXRD->bases())
868  Bases.push_back(&BS);
869  }
870 
871  for (const auto *FD : RD->fields()) {
872  // Skip zero length bitfields.
873  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
874  continue;
875  assert(!FD->isBitField() &&
876  "Cannot expand structure with bit-field members.");
877  Fields.push_back(FD);
878  }
879  }
880  return llvm::make_unique<RecordExpansion>(std::move(Bases),
881  std::move(Fields));
882  }
883  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
884  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
885  }
886  return llvm::make_unique<NoExpansion>();
887 }
888 
889 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
890  auto Exp = getTypeExpansion(Ty, Context);
891  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
892  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
893  }
894  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
895  int Res = 0;
896  for (auto BS : RExp->Bases)
897  Res += getExpansionSize(BS->getType(), Context);
898  for (auto FD : RExp->Fields)
899  Res += getExpansionSize(FD->getType(), Context);
900  return Res;
901  }
902  if (isa<ComplexExpansion>(Exp.get()))
903  return 2;
904  assert(isa<NoExpansion>(Exp.get()));
905  return 1;
906 }
907 
908 void
911  auto Exp = getTypeExpansion(Ty, Context);
912  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
913  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
914  getExpandedTypes(CAExp->EltTy, TI);
915  }
916  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
917  for (auto BS : RExp->Bases)
918  getExpandedTypes(BS->getType(), TI);
919  for (auto FD : RExp->Fields)
920  getExpandedTypes(FD->getType(), TI);
921  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
922  llvm::Type *EltTy = ConvertType(CExp->EltTy);
923  *TI++ = EltTy;
924  *TI++ = EltTy;
925  } else {
926  assert(isa<NoExpansion>(Exp.get()));
927  *TI++ = ConvertType(Ty);
928  }
929 }
930 
932  ConstantArrayExpansion *CAE,
933  Address BaseAddr,
934  llvm::function_ref<void(Address)> Fn) {
935  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
936  CharUnits EltAlign =
937  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
938 
939  for (int i = 0, n = CAE->NumElts; i < n; i++) {
940  llvm::Value *EltAddr =
941  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
942  Fn(Address(EltAddr, EltAlign));
943  }
944 }
945 
946 void CodeGenFunction::ExpandTypeFromArgs(
948  assert(LV.isSimple() &&
949  "Unexpected non-simple lvalue during struct expansion.");
950 
951  auto Exp = getTypeExpansion(Ty, getContext());
952  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
953  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
954  [&](Address EltAddr) {
955  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
956  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
957  });
958  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
959  Address This = LV.getAddress();
960  for (const CXXBaseSpecifier *BS : RExp->Bases) {
961  // Perform a single step derived-to-base conversion.
962  Address Base =
963  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
964  /*NullCheckValue=*/false, SourceLocation());
965  LValue SubLV = MakeAddrLValue(Base, BS->getType());
966 
967  // Recurse onto bases.
968  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
969  }
970  for (auto FD : RExp->Fields) {
971  // FIXME: What are the right qualifiers here?
973  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
974  }
975  } else if (isa<ComplexExpansion>(Exp.get())) {
976  auto realValue = *AI++;
977  auto imagValue = *AI++;
978  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
979  } else {
980  assert(isa<NoExpansion>(Exp.get()));
982  }
983 }
984 
985 void CodeGenFunction::ExpandTypeToArgs(
986  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
987  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
988  auto Exp = getTypeExpansion(Ty, getContext());
989  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
991  [&](Address EltAddr) {
992  RValue EltRV =
993  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
994  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
995  });
996  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
997  Address This = RV.getAggregateAddress();
998  for (const CXXBaseSpecifier *BS : RExp->Bases) {
999  // Perform a single step derived-to-base conversion.
1000  Address Base =
1001  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1002  /*NullCheckValue=*/false, SourceLocation());
1003  RValue BaseRV = RValue::getAggregate(Base);
1004 
1005  // Recurse onto bases.
1006  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1007  IRCallArgPos);
1008  }
1009 
1010  LValue LV = MakeAddrLValue(This, Ty);
1011  for (auto FD : RExp->Fields) {
1012  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1013  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1014  IRCallArgPos);
1015  }
1016  } else if (isa<ComplexExpansion>(Exp.get())) {
1017  ComplexPairTy CV = RV.getComplexVal();
1018  IRCallArgs[IRCallArgPos++] = CV.first;
1019  IRCallArgs[IRCallArgPos++] = CV.second;
1020  } else {
1021  assert(isa<NoExpansion>(Exp.get()));
1022  assert(RV.isScalar() &&
1023  "Unexpected non-scalar rvalue during struct expansion.");
1024 
1025  // Insert a bitcast as needed.
1026  llvm::Value *V = RV.getScalarVal();
1027  if (IRCallArgPos < IRFuncTy->getNumParams() &&
1028  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1029  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1030 
1031  IRCallArgs[IRCallArgPos++] = V;
1032  }
1033 }
1034 
1035 /// Create a temporary allocation for the purposes of coercion.
1037  CharUnits MinAlign) {
1038  // Don't use an alignment that's worse than what LLVM would prefer.
1039  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1040  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1041 
1042  return CGF.CreateTempAlloca(Ty, Align);
1043 }
1044 
1045 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1046 /// accessing some number of bytes out of it, try to gep into the struct to get
1047 /// at its inner goodness. Dive as deep as possible without entering an element
1048 /// with an in-memory size smaller than DstSize.
1049 static Address
1051  llvm::StructType *SrcSTy,
1052  uint64_t DstSize, CodeGenFunction &CGF) {
1053  // We can't dive into a zero-element struct.
1054  if (SrcSTy->getNumElements() == 0) return SrcPtr;
1055 
1056  llvm::Type *FirstElt = SrcSTy->getElementType(0);
1057 
1058  // If the first elt is at least as large as what we're looking for, or if the
1059  // first element is the same size as the whole struct, we can enter it. The
1060  // comparison must be made on the store size and not the alloca size. Using
1061  // the alloca size may overstate the size of the load.
1062  uint64_t FirstEltSize =
1063  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1064  if (FirstEltSize < DstSize &&
1065  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1066  return SrcPtr;
1067 
1068  // GEP into the first element.
1069  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1070 
1071  // If the first element is a struct, recurse.
1072  llvm::Type *SrcTy = SrcPtr.getElementType();
1073  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1074  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1075 
1076  return SrcPtr;
1077 }
1078 
1079 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1080 /// are either integers or pointers. This does a truncation of the value if it
1081 /// is too large or a zero extension if it is too small.
1082 ///
1083 /// This behaves as if the value were coerced through memory, so on big-endian
1084 /// targets the high bits are preserved in a truncation, while little-endian
1085 /// targets preserve the low bits.
1087  llvm::Type *Ty,
1088  CodeGenFunction &CGF) {
1089  if (Val->getType() == Ty)
1090  return Val;
1091 
1092  if (isa<llvm::PointerType>(Val->getType())) {
1093  // If this is Pointer->Pointer avoid conversion to and from int.
1094  if (isa<llvm::PointerType>(Ty))
1095  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1096 
1097  // Convert the pointer to an integer so we can play with its width.
1098  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1099  }
1100 
1101  llvm::Type *DestIntTy = Ty;
1102  if (isa<llvm::PointerType>(DestIntTy))
1103  DestIntTy = CGF.IntPtrTy;
1104 
1105  if (Val->getType() != DestIntTy) {
1106  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1107  if (DL.isBigEndian()) {
1108  // Preserve the high bits on big-endian targets.
1109  // That is what memory coercion does.
1110  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1111  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1112 
1113  if (SrcSize > DstSize) {
1114  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1115  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1116  } else {
1117  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1118  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1119  }
1120  } else {
1121  // Little-endian targets preserve the low bits. No shifts required.
1122  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1123  }
1124  }
1125 
1126  if (isa<llvm::PointerType>(Ty))
1127  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1128  return Val;
1129 }
1130 
1131 
1132 
1133 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1134 /// a pointer to an object of type \arg Ty, known to be aligned to
1135 /// \arg SrcAlign bytes.
1136 ///
1137 /// This safely handles the case when the src type is smaller than the
1138 /// destination type; in this situation the values of bits which not
1139 /// present in the src are undefined.
1141  CodeGenFunction &CGF) {
1142  llvm::Type *SrcTy = Src.getElementType();
1143 
1144  // If SrcTy and Ty are the same, just do a load.
1145  if (SrcTy == Ty)
1146  return CGF.Builder.CreateLoad(Src);
1147 
1148  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1149 
1150  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1151  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1152  SrcTy = Src.getType()->getElementType();
1153  }
1154 
1155  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1156 
1157  // If the source and destination are integer or pointer types, just do an
1158  // extension or truncation to the desired type.
1159  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1160  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1161  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1162  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1163  }
1164 
1165  // If load is legal, just bitcast the src pointer.
1166  if (SrcSize >= DstSize) {
1167  // Generally SrcSize is never greater than DstSize, since this means we are
1168  // losing bits. However, this can happen in cases where the structure has
1169  // additional padding, for example due to a user specified alignment.
1170  //
1171  // FIXME: Assert that we aren't truncating non-padding bits when have access
1172  // to that information.
1173  Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1174  return CGF.Builder.CreateLoad(Src);
1175  }
1176 
1177  // Otherwise do coercion through memory. This is stupid, but simple.
1178  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1179  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1180  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1181  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1182  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1183  false);
1184  return CGF.Builder.CreateLoad(Tmp);
1185 }
1186 
1187 // Function to store a first-class aggregate into memory. We prefer to
1188 // store the elements rather than the aggregate to be more friendly to
1189 // fast-isel.
1190 // FIXME: Do we need to recurse here?
1192  Address Dest, bool DestIsVolatile) {
1193  // Prefer scalar stores to first-class aggregate stores.
1194  if (llvm::StructType *STy =
1195  dyn_cast<llvm::StructType>(Val->getType())) {
1196  const llvm::StructLayout *Layout =
1197  CGF.CGM.getDataLayout().getStructLayout(STy);
1198 
1199  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1200  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1201  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1202  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1203  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1204  }
1205  } else {
1206  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1207  }
1208 }
1209 
1210 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1211 /// where the source and destination may have different types. The
1212 /// destination is known to be aligned to \arg DstAlign bytes.
1213 ///
1214 /// This safely handles the case when the src type is larger than the
1215 /// destination type; the upper bits of the src will be lost.
1217  Address Dst,
1218  bool DstIsVolatile,
1219  CodeGenFunction &CGF) {
1220  llvm::Type *SrcTy = Src->getType();
1221  llvm::Type *DstTy = Dst.getType()->getElementType();
1222  if (SrcTy == DstTy) {
1223  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1224  return;
1225  }
1226 
1227  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1228 
1229  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1230  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1231  DstTy = Dst.getType()->getElementType();
1232  }
1233 
1234  // If the source and destination are integer or pointer types, just do an
1235  // extension or truncation to the desired type.
1236  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1237  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1238  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1239  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1240  return;
1241  }
1242 
1243  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1244 
1245  // If store is legal, just bitcast the src pointer.
1246  if (SrcSize <= DstSize) {
1247  Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1248  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1249  } else {
1250  // Otherwise do coercion through memory. This is stupid, but
1251  // simple.
1252 
1253  // Generally SrcSize is never greater than DstSize, since this means we are
1254  // losing bits. However, this can happen in cases where the structure has
1255  // additional padding, for example due to a user specified alignment.
1256  //
1257  // FIXME: Assert that we aren't truncating non-padding bits when have access
1258  // to that information.
1259  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1260  CGF.Builder.CreateStore(Src, Tmp);
1261  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1262  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1263  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1264  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1265  false);
1266  }
1267 }
1268 
1270  const ABIArgInfo &info) {
1271  if (unsigned offset = info.getDirectOffset()) {
1272  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1273  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1274  CharUnits::fromQuantity(offset));
1275  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1276  }
1277  return addr;
1278 }
1279 
1280 namespace {
1281 
1282 /// Encapsulates information about the way function arguments from
1283 /// CGFunctionInfo should be passed to actual LLVM IR function.
1284 class ClangToLLVMArgMapping {
1285  static const unsigned InvalidIndex = ~0U;
1286  unsigned InallocaArgNo;
1287  unsigned SRetArgNo;
1288  unsigned TotalIRArgs;
1289 
1290  /// Arguments of LLVM IR function corresponding to single Clang argument.
1291  struct IRArgs {
1292  unsigned PaddingArgIndex;
1293  // Argument is expanded to IR arguments at positions
1294  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1295  unsigned FirstArgIndex;
1296  unsigned NumberOfArgs;
1297 
1298  IRArgs()
1299  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1300  NumberOfArgs(0) {}
1301  };
1302 
1303  SmallVector<IRArgs, 8> ArgInfo;
1304 
1305 public:
1306  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1307  bool OnlyRequiredArgs = false)
1308  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1309  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1310  construct(Context, FI, OnlyRequiredArgs);
1311  }
1312 
1313  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1314  unsigned getInallocaArgNo() const {
1315  assert(hasInallocaArg());
1316  return InallocaArgNo;
1317  }
1318 
1319  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1320  unsigned getSRetArgNo() const {
1321  assert(hasSRetArg());
1322  return SRetArgNo;
1323  }
1324 
1325  unsigned totalIRArgs() const { return TotalIRArgs; }
1326 
1327  bool hasPaddingArg(unsigned ArgNo) const {
1328  assert(ArgNo < ArgInfo.size());
1329  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1330  }
1331  unsigned getPaddingArgNo(unsigned ArgNo) const {
1332  assert(hasPaddingArg(ArgNo));
1333  return ArgInfo[ArgNo].PaddingArgIndex;
1334  }
1335 
1336  /// Returns index of first IR argument corresponding to ArgNo, and their
1337  /// quantity.
1338  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1339  assert(ArgNo < ArgInfo.size());
1340  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1341  ArgInfo[ArgNo].NumberOfArgs);
1342  }
1343 
1344 private:
1345  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1346  bool OnlyRequiredArgs);
1347 };
1348 
1349 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1350  const CGFunctionInfo &FI,
1351  bool OnlyRequiredArgs) {
1352  unsigned IRArgNo = 0;
1353  bool SwapThisWithSRet = false;
1354  const ABIArgInfo &RetAI = FI.getReturnInfo();
1355 
1356  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1357  SwapThisWithSRet = RetAI.isSRetAfterThis();
1358  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1359  }
1360 
1361  unsigned ArgNo = 0;
1362  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1364  ++I, ++ArgNo) {
1365  assert(I != FI.arg_end());
1366  QualType ArgType = I->type;
1367  const ABIArgInfo &AI = I->info;
1368  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1369  auto &IRArgs = ArgInfo[ArgNo];
1370 
1371  if (AI.getPaddingType())
1372  IRArgs.PaddingArgIndex = IRArgNo++;
1373 
1374  switch (AI.getKind()) {
1375  case ABIArgInfo::Extend:
1376  case ABIArgInfo::Direct: {
1377  // FIXME: handle sseregparm someday...
1378  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1379  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1380  IRArgs.NumberOfArgs = STy->getNumElements();
1381  } else {
1382  IRArgs.NumberOfArgs = 1;
1383  }
1384  break;
1385  }
1386  case ABIArgInfo::Indirect:
1387  IRArgs.NumberOfArgs = 1;
1388  break;
1389  case ABIArgInfo::Ignore:
1390  case ABIArgInfo::InAlloca:
1391  // ignore and inalloca doesn't have matching LLVM parameters.
1392  IRArgs.NumberOfArgs = 0;
1393  break;
1395  IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1396  break;
1397  case ABIArgInfo::Expand:
1398  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1399  break;
1400  }
1401 
1402  if (IRArgs.NumberOfArgs > 0) {
1403  IRArgs.FirstArgIndex = IRArgNo;
1404  IRArgNo += IRArgs.NumberOfArgs;
1405  }
1406 
1407  // Skip over the sret parameter when it comes second. We already handled it
1408  // above.
1409  if (IRArgNo == 1 && SwapThisWithSRet)
1410  IRArgNo++;
1411  }
1412  assert(ArgNo == ArgInfo.size());
1413 
1414  if (FI.usesInAlloca())
1415  InallocaArgNo = IRArgNo++;
1416 
1417  TotalIRArgs = IRArgNo;
1418 }
1419 } // namespace
1420 
1421 /***/
1422 
1424  return FI.getReturnInfo().isIndirect();
1425 }
1426 
1428  return ReturnTypeUsesSRet(FI) &&
1430 }
1431 
1433  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1434  switch (BT->getKind()) {
1435  default:
1436  return false;
1437  case BuiltinType::Float:
1439  case BuiltinType::Double:
1441  case BuiltinType::LongDouble:
1443  }
1444  }
1445 
1446  return false;
1447 }
1448 
1450  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1451  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1452  if (BT->getKind() == BuiltinType::LongDouble)
1454  }
1455  }
1456 
1457  return false;
1458 }
1459 
1461  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1462  return GetFunctionType(FI);
1463 }
1464 
1465 llvm::FunctionType *
1467 
1468  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1469  (void)Inserted;
1470  assert(Inserted && "Recursively being processed?");
1471 
1472  llvm::Type *resultType = nullptr;
1473  const ABIArgInfo &retAI = FI.getReturnInfo();
1474  switch (retAI.getKind()) {
1475  case ABIArgInfo::Expand:
1476  llvm_unreachable("Invalid ABI kind for return argument");
1477 
1478  case ABIArgInfo::Extend:
1479  case ABIArgInfo::Direct:
1480  resultType = retAI.getCoerceToType();
1481  break;
1482 
1483  case ABIArgInfo::InAlloca:
1484  if (retAI.getInAllocaSRet()) {
1485  // sret things on win32 aren't void, they return the sret pointer.
1486  QualType ret = FI.getReturnType();
1487  llvm::Type *ty = ConvertType(ret);
1488  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1489  resultType = llvm::PointerType::get(ty, addressSpace);
1490  } else {
1491  resultType = llvm::Type::getVoidTy(getLLVMContext());
1492  }
1493  break;
1494 
1495  case ABIArgInfo::Indirect:
1496  case ABIArgInfo::Ignore:
1497  resultType = llvm::Type::getVoidTy(getLLVMContext());
1498  break;
1499 
1501  resultType = retAI.getUnpaddedCoerceAndExpandType();
1502  break;
1503  }
1504 
1505  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1506  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1507 
1508  // Add type for sret argument.
1509  if (IRFunctionArgs.hasSRetArg()) {
1510  QualType Ret = FI.getReturnType();
1511  llvm::Type *Ty = ConvertType(Ret);
1512  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1513  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1514  llvm::PointerType::get(Ty, AddressSpace);
1515  }
1516 
1517  // Add type for inalloca argument.
1518  if (IRFunctionArgs.hasInallocaArg()) {
1519  auto ArgStruct = FI.getArgStruct();
1520  assert(ArgStruct);
1521  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1522  }
1523 
1524  // Add in all of the required arguments.
1525  unsigned ArgNo = 0;
1527  ie = it + FI.getNumRequiredArgs();
1528  for (; it != ie; ++it, ++ArgNo) {
1529  const ABIArgInfo &ArgInfo = it->info;
1530 
1531  // Insert a padding type to ensure proper alignment.
1532  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1533  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1534  ArgInfo.getPaddingType();
1535 
1536  unsigned FirstIRArg, NumIRArgs;
1537  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1538 
1539  switch (ArgInfo.getKind()) {
1540  case ABIArgInfo::Ignore:
1541  case ABIArgInfo::InAlloca:
1542  assert(NumIRArgs == 0);
1543  break;
1544 
1545  case ABIArgInfo::Indirect: {
1546  assert(NumIRArgs == 1);
1547  // indirect arguments are always on the stack, which is addr space #0.
1548  llvm::Type *LTy = ConvertTypeForMem(it->type);
1549  ArgTypes[FirstIRArg] = LTy->getPointerTo();
1550  break;
1551  }
1552 
1553  case ABIArgInfo::Extend:
1554  case ABIArgInfo::Direct: {
1555  // Fast-isel and the optimizer generally like scalar values better than
1556  // FCAs, so we flatten them if this is safe to do for this argument.
1557  llvm::Type *argType = ArgInfo.getCoerceToType();
1558  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1559  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1560  assert(NumIRArgs == st->getNumElements());
1561  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1562  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1563  } else {
1564  assert(NumIRArgs == 1);
1565  ArgTypes[FirstIRArg] = argType;
1566  }
1567  break;
1568  }
1569 
1571  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1572  for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1573  *ArgTypesIter++ = EltTy;
1574  }
1575  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1576  break;
1577  }
1578 
1579  case ABIArgInfo::Expand:
1580  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1581  getExpandedTypes(it->type, ArgTypesIter);
1582  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1583  break;
1584  }
1585  }
1586 
1587  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1588  assert(Erased && "Not in set?");
1589 
1590  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1591 }
1592 
1594  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1595  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1596 
1597  if (!isFuncTypeConvertible(FPT))
1598  return llvm::StructType::get(getLLVMContext());
1599 
1600  const CGFunctionInfo *Info;
1601  if (isa<CXXDestructorDecl>(MD))
1602  Info =
1604  else
1605  Info = &arrangeCXXMethodDeclaration(MD);
1606  return GetFunctionType(*Info);
1607 }
1608 
1610  llvm::AttrBuilder &FuncAttrs,
1611  const FunctionProtoType *FPT) {
1612  if (!FPT)
1613  return;
1614 
1616  FPT->isNothrow(Ctx))
1617  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1618 }
1619 
1621  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1622  AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1623  llvm::AttrBuilder FuncAttrs;
1624  llvm::AttrBuilder RetAttrs;
1625  bool HasOptnone = false;
1626 
1627  CallingConv = FI.getEffectiveCallingConvention();
1628 
1629  if (FI.isNoReturn())
1630  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1631 
1632  // If we have information about the function prototype, we can learn
1633  // attributes form there.
1635  CalleeInfo.getCalleeFunctionProtoType());
1636 
1637  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1638 
1639  bool HasAnyX86InterruptAttr = false;
1640  // FIXME: handle sseregparm someday...
1641  if (TargetDecl) {
1642  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1643  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1644  if (TargetDecl->hasAttr<NoThrowAttr>())
1645  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1646  if (TargetDecl->hasAttr<NoReturnAttr>())
1647  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1648  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1649  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1650 
1651  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1653  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1654  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1655  // These attributes are not inherited by overloads.
1656  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1657  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1658  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1659  }
1660 
1661  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1662  if (TargetDecl->hasAttr<ConstAttr>()) {
1663  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1664  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1665  } else if (TargetDecl->hasAttr<PureAttr>()) {
1666  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1667  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1668  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1669  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1670  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1671  }
1672  if (TargetDecl->hasAttr<RestrictAttr>())
1673  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1674  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1675  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1676 
1677  HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
1678  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1679  }
1680 
1681  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1682  if (!HasOptnone) {
1683  if (CodeGenOpts.OptimizeSize)
1684  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1685  if (CodeGenOpts.OptimizeSize == 2)
1686  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1687  }
1688 
1689  if (CodeGenOpts.DisableRedZone)
1690  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1691  if (CodeGenOpts.NoImplicitFloat)
1692  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1693  if (CodeGenOpts.EnableSegmentedStacks &&
1694  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1695  FuncAttrs.addAttribute("split-stack");
1696 
1697  if (AttrOnCallSite) {
1698  // Attributes that should go on the call site only.
1699  if (!CodeGenOpts.SimplifyLibCalls ||
1700  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1701  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1702  if (!CodeGenOpts.TrapFuncName.empty())
1703  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1704  } else {
1705  // Attributes that should go on the function, but not the call site.
1706  if (!CodeGenOpts.DisableFPElim) {
1707  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1708  } else if (CodeGenOpts.OmitLeafFramePointer) {
1709  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1710  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1711  } else {
1712  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1713  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1714  }
1715 
1716  bool DisableTailCalls =
1717  CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||
1718  (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1719  FuncAttrs.addAttribute(
1720  "disable-tail-calls",
1721  llvm::toStringRef(DisableTailCalls));
1722 
1723  FuncAttrs.addAttribute("less-precise-fpmad",
1724  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1725  FuncAttrs.addAttribute("no-infs-fp-math",
1726  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1727  FuncAttrs.addAttribute("no-nans-fp-math",
1728  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1729  FuncAttrs.addAttribute("unsafe-fp-math",
1730  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1731  FuncAttrs.addAttribute("use-soft-float",
1732  llvm::toStringRef(CodeGenOpts.SoftFloat));
1733  FuncAttrs.addAttribute("stack-protector-buffer-size",
1734  llvm::utostr(CodeGenOpts.SSPBufferSize));
1735  FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1736  llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1737 
1738  if (CodeGenOpts.StackRealignment)
1739  FuncAttrs.addAttribute("stackrealign");
1740  if (CodeGenOpts.Backchain)
1741  FuncAttrs.addAttribute("backchain");
1742 
1743  // Add target-cpu and target-features attributes to functions. If
1744  // we have a decl for the function and it has a target attribute then
1745  // parse that and add it to the feature set.
1746  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1747  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1748  if (FD && FD->hasAttr<TargetAttr>()) {
1749  llvm::StringMap<bool> FeatureMap;
1750  getFunctionFeatureMap(FeatureMap, FD);
1751 
1752  // Produce the canonical string for this set of features.
1753  std::vector<std::string> Features;
1754  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1755  ie = FeatureMap.end();
1756  it != ie; ++it)
1757  Features.push_back((it->second ? "+" : "-") + it->first().str());
1758 
1759  // Now add the target-cpu and target-features to the function.
1760  // While we populated the feature map above, we still need to
1761  // get and parse the target attribute so we can get the cpu for
1762  // the function.
1763  const auto *TD = FD->getAttr<TargetAttr>();
1764  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1765  if (ParsedAttr.second != "")
1766  TargetCPU = ParsedAttr.second;
1767  if (TargetCPU != "")
1768  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1769  if (!Features.empty()) {
1770  std::sort(Features.begin(), Features.end());
1771  FuncAttrs.addAttribute(
1772  "target-features",
1773  llvm::join(Features.begin(), Features.end(), ","));
1774  }
1775  } else {
1776  // Otherwise just add the existing target cpu and target features to the
1777  // function.
1778  std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1779  if (TargetCPU != "")
1780  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1781  if (!Features.empty()) {
1782  std::sort(Features.begin(), Features.end());
1783  FuncAttrs.addAttribute(
1784  "target-features",
1785  llvm::join(Features.begin(), Features.end(), ","));
1786  }
1787  }
1788  }
1789 
1790  if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1791  // Conservatively, mark all functions and calls in CUDA as convergent
1792  // (meaning, they may call an intrinsically convergent op, such as
1793  // __syncthreads(), and so can't have certain optimizations applied around
1794  // them). LLVM will remove this attribute where it safely can.
1795  FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1796 
1797  // Respect -fcuda-flush-denormals-to-zero.
1798  if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1799  FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1800  }
1801 
1802  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1803 
1804  QualType RetTy = FI.getReturnType();
1805  const ABIArgInfo &RetAI = FI.getReturnInfo();
1806  switch (RetAI.getKind()) {
1807  case ABIArgInfo::Extend:
1808  if (RetTy->hasSignedIntegerRepresentation())
1809  RetAttrs.addAttribute(llvm::Attribute::SExt);
1810  else if (RetTy->hasUnsignedIntegerRepresentation())
1811  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1812  // FALL THROUGH
1813  case ABIArgInfo::Direct:
1814  if (RetAI.getInReg())
1815  RetAttrs.addAttribute(llvm::Attribute::InReg);
1816  break;
1817  case ABIArgInfo::Ignore:
1818  break;
1819 
1820  case ABIArgInfo::InAlloca:
1821  case ABIArgInfo::Indirect: {
1822  // inalloca and sret disable readnone and readonly
1823  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1824  .removeAttribute(llvm::Attribute::ReadNone);
1825  break;
1826  }
1827 
1829  break;
1830 
1831  case ABIArgInfo::Expand:
1832  llvm_unreachable("Invalid ABI kind for return argument");
1833  }
1834 
1835  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1836  QualType PTy = RefTy->getPointeeType();
1837  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1838  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1839  .getQuantity());
1840  else if (getContext().getTargetAddressSpace(PTy) == 0)
1841  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1842  }
1843 
1844  // Attach return attributes.
1845  if (RetAttrs.hasAttributes()) {
1846  PAL.push_back(llvm::AttributeSet::get(
1847  getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1848  }
1849 
1850  bool hasUsedSRet = false;
1851 
1852  // Attach attributes to sret.
1853  if (IRFunctionArgs.hasSRetArg()) {
1854  llvm::AttrBuilder SRETAttrs;
1855  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1856  hasUsedSRet = true;
1857  if (RetAI.getInReg())
1858  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1859  PAL.push_back(llvm::AttributeSet::get(
1860  getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1861  }
1862 
1863  // Attach attributes to inalloca argument.
1864  if (IRFunctionArgs.hasInallocaArg()) {
1865  llvm::AttrBuilder Attrs;
1866  Attrs.addAttribute(llvm::Attribute::InAlloca);
1867  PAL.push_back(llvm::AttributeSet::get(
1868  getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1869  }
1870 
1871  unsigned ArgNo = 0;
1873  E = FI.arg_end();
1874  I != E; ++I, ++ArgNo) {
1875  QualType ParamType = I->type;
1876  const ABIArgInfo &AI = I->info;
1877  llvm::AttrBuilder Attrs;
1878 
1879  // Add attribute for padding argument, if necessary.
1880  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1881  if (AI.getPaddingInReg())
1882  PAL.push_back(llvm::AttributeSet::get(
1883  getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1884  llvm::Attribute::InReg));
1885  }
1886 
1887  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1888  // have the corresponding parameter variable. It doesn't make
1889  // sense to do it here because parameters are so messed up.
1890  switch (AI.getKind()) {
1891  case ABIArgInfo::Extend:
1892  if (ParamType->isSignedIntegerOrEnumerationType())
1893  Attrs.addAttribute(llvm::Attribute::SExt);
1894  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1895  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1896  Attrs.addAttribute(llvm::Attribute::SExt);
1897  else
1898  Attrs.addAttribute(llvm::Attribute::ZExt);
1899  }
1900  // FALL THROUGH
1901  case ABIArgInfo::Direct:
1902  if (ArgNo == 0 && FI.isChainCall())
1903  Attrs.addAttribute(llvm::Attribute::Nest);
1904  else if (AI.getInReg())
1905  Attrs.addAttribute(llvm::Attribute::InReg);
1906  break;
1907 
1908  case ABIArgInfo::Indirect: {
1909  if (AI.getInReg())
1910  Attrs.addAttribute(llvm::Attribute::InReg);
1911 
1912  if (AI.getIndirectByVal())
1913  Attrs.addAttribute(llvm::Attribute::ByVal);
1914 
1915  CharUnits Align = AI.getIndirectAlign();
1916 
1917  // In a byval argument, it is important that the required
1918  // alignment of the type is honored, as LLVM might be creating a
1919  // *new* stack object, and needs to know what alignment to give
1920  // it. (Sometimes it can deduce a sensible alignment on its own,
1921  // but not if clang decides it must emit a packed struct, or the
1922  // user specifies increased alignment requirements.)
1923  //
1924  // This is different from indirect *not* byval, where the object
1925  // exists already, and the align attribute is purely
1926  // informative.
1927  assert(!Align.isZero());
1928 
1929  // For now, only add this when we have a byval argument.
1930  // TODO: be less lazy about updating test cases.
1931  if (AI.getIndirectByVal())
1932  Attrs.addAlignmentAttr(Align.getQuantity());
1933 
1934  // byval disables readnone and readonly.
1935  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1936  .removeAttribute(llvm::Attribute::ReadNone);
1937  break;
1938  }
1939  case ABIArgInfo::Ignore:
1940  case ABIArgInfo::Expand:
1942  break;
1943 
1944  case ABIArgInfo::InAlloca:
1945  // inalloca disables readnone and readonly.
1946  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1947  .removeAttribute(llvm::Attribute::ReadNone);
1948  continue;
1949  }
1950 
1951  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1952  QualType PTy = RefTy->getPointeeType();
1953  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1954  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1955  .getQuantity());
1956  else if (getContext().getTargetAddressSpace(PTy) == 0)
1957  Attrs.addAttribute(llvm::Attribute::NonNull);
1958  }
1959 
1960  switch (FI.getExtParameterInfo(ArgNo).getABI()) {
1962  break;
1963 
1965  // Add 'sret' if we haven't already used it for something, but
1966  // only if the result is void.
1967  if (!hasUsedSRet && RetTy->isVoidType()) {
1968  Attrs.addAttribute(llvm::Attribute::StructRet);
1969  hasUsedSRet = true;
1970  }
1971 
1972  // Add 'noalias' in either case.
1973  Attrs.addAttribute(llvm::Attribute::NoAlias);
1974 
1975  // Add 'dereferenceable' and 'alignment'.
1976  auto PTy = ParamType->getPointeeType();
1977  if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
1978  auto info = getContext().getTypeInfoInChars(PTy);
1979  Attrs.addDereferenceableAttr(info.first.getQuantity());
1980  Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
1981  info.second.getQuantity()));
1982  }
1983  break;
1984  }
1985 
1987  Attrs.addAttribute(llvm::Attribute::SwiftError);
1988  break;
1989 
1991  Attrs.addAttribute(llvm::Attribute::SwiftSelf);
1992  break;
1993  }
1994 
1995  if (Attrs.hasAttributes()) {
1996  unsigned FirstIRArg, NumIRArgs;
1997  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1998  for (unsigned i = 0; i < NumIRArgs; i++)
1999  PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
2000  FirstIRArg + i + 1, Attrs));
2001  }
2002  }
2003  assert(ArgNo == FI.arg_size());
2004 
2005  if (FuncAttrs.hasAttributes())
2006  PAL.push_back(llvm::
2007  AttributeSet::get(getLLVMContext(),
2008  llvm::AttributeSet::FunctionIndex,
2009  FuncAttrs));
2010 }
2011 
2012 /// An argument came in as a promoted argument; demote it back to its
2013 /// declared type.
2015  const VarDecl *var,
2016  llvm::Value *value) {
2017  llvm::Type *varType = CGF.ConvertType(var->getType());
2018 
2019  // This can happen with promotions that actually don't change the
2020  // underlying type, like the enum promotions.
2021  if (value->getType() == varType) return value;
2022 
2023  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2024  && "unexpected promotion type");
2025 
2026  if (isa<llvm::IntegerType>(varType))
2027  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2028 
2029  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2030 }
2031 
2032 /// Returns the attribute (either parameter attribute, or function
2033 /// attribute), which declares argument ArgNo to be non-null.
2034 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2035  QualType ArgType, unsigned ArgNo) {
2036  // FIXME: __attribute__((nonnull)) can also be applied to:
2037  // - references to pointers, where the pointee is known to be
2038  // nonnull (apparently a Clang extension)
2039  // - transparent unions containing pointers
2040  // In the former case, LLVM IR cannot represent the constraint. In
2041  // the latter case, we have no guarantee that the transparent union
2042  // is in fact passed as a pointer.
2043  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2044  return nullptr;
2045  // First, check attribute on parameter itself.
2046  if (PVD) {
2047  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2048  return ParmNNAttr;
2049  }
2050  // Check function attributes.
2051  if (!FD)
2052  return nullptr;
2053  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2054  if (NNAttr->isNonNull(ArgNo))
2055  return NNAttr;
2056  }
2057  return nullptr;
2058 }
2059 
2060 namespace {
2061  struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2062  Address Temp;
2063  Address Arg;
2064  CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2065  void Emit(CodeGenFunction &CGF, Flags flags) override {
2066  llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2067  CGF.Builder.CreateStore(errorValue, Arg);
2068  }
2069  };
2070 }
2071 
2073  llvm::Function *Fn,
2074  const FunctionArgList &Args) {
2075  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2076  // Naked functions don't have prologues.
2077  return;
2078 
2079  // If this is an implicit-return-zero function, go ahead and
2080  // initialize the return value. TODO: it might be nice to have
2081  // a more general mechanism for this that didn't require synthesized
2082  // return statements.
2083  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2084  if (FD->hasImplicitReturnZero()) {
2085  QualType RetTy = FD->getReturnType().getUnqualifiedType();
2086  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2087  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2089  }
2090  }
2091 
2092  // FIXME: We no longer need the types from FunctionArgList; lift up and
2093  // simplify.
2094 
2095  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2096  // Flattened function arguments.
2098  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2099  for (auto &Arg : Fn->args()) {
2100  FnArgs.push_back(&Arg);
2101  }
2102  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2103 
2104  // If we're using inalloca, all the memory arguments are GEPs off of the last
2105  // parameter, which is a pointer to the complete memory area.
2106  Address ArgStruct = Address::invalid();
2107  const llvm::StructLayout *ArgStructLayout = nullptr;
2108  if (IRFunctionArgs.hasInallocaArg()) {
2109  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2110  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2111  FI.getArgStructAlignment());
2112 
2113  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2114  }
2115 
2116  // Name the struct return parameter.
2117  if (IRFunctionArgs.hasSRetArg()) {
2118  auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2119  AI->setName("agg.result");
2120  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
2121  llvm::Attribute::NoAlias));
2122  }
2123 
2124  // Track if we received the parameter as a pointer (indirect, byval, or
2125  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2126  // into a local alloca for us.
2128  ArgVals.reserve(Args.size());
2129 
2130  // Create a pointer value for every parameter declaration. This usually
2131  // entails copying one or more LLVM IR arguments into an alloca. Don't push
2132  // any cleanups or do anything that might unwind. We do that separately, so
2133  // we can push the cleanups in the correct order for the ABI.
2134  assert(FI.arg_size() == Args.size() &&
2135  "Mismatch between function signature & arguments.");
2136  unsigned ArgNo = 0;
2138  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2139  i != e; ++i, ++info_it, ++ArgNo) {
2140  const VarDecl *Arg = *i;
2141  QualType Ty = info_it->type;
2142  const ABIArgInfo &ArgI = info_it->info;
2143 
2144  bool isPromoted =
2145  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2146 
2147  unsigned FirstIRArg, NumIRArgs;
2148  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2149 
2150  switch (ArgI.getKind()) {
2151  case ABIArgInfo::InAlloca: {
2152  assert(NumIRArgs == 0);
2153  auto FieldIndex = ArgI.getInAllocaFieldIndex();
2154  CharUnits FieldOffset =
2155  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2156  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2157  Arg->getName());
2158  ArgVals.push_back(ParamValue::forIndirect(V));
2159  break;
2160  }
2161 
2162  case ABIArgInfo::Indirect: {
2163  assert(NumIRArgs == 1);
2164  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2165 
2166  if (!hasScalarEvaluationKind(Ty)) {
2167  // Aggregates and complex variables are accessed by reference. All we
2168  // need to do is realign the value, if requested.
2169  Address V = ParamAddr;
2170  if (ArgI.getIndirectRealign()) {
2171  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2172 
2173  // Copy from the incoming argument pointer to the temporary with the
2174  // appropriate alignment.
2175  //
2176  // FIXME: We should have a common utility for generating an aggregate
2177  // copy.
2179  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2180  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2181  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2182  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2183  V = AlignedTemp;
2184  }
2185  ArgVals.push_back(ParamValue::forIndirect(V));
2186  } else {
2187  // Load scalar value from indirect argument.
2188  llvm::Value *V =
2189  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2190 
2191  if (isPromoted)
2192  V = emitArgumentDemotion(*this, Arg, V);
2193  ArgVals.push_back(ParamValue::forDirect(V));
2194  }
2195  break;
2196  }
2197 
2198  case ABIArgInfo::Extend:
2199  case ABIArgInfo::Direct: {
2200 
2201  // If we have the trivial case, handle it with no muss and fuss.
2202  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2203  ArgI.getCoerceToType() == ConvertType(Ty) &&
2204  ArgI.getDirectOffset() == 0) {
2205  assert(NumIRArgs == 1);
2206  llvm::Value *V = FnArgs[FirstIRArg];
2207  auto AI = cast<llvm::Argument>(V);
2208 
2209  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2210  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2211  PVD->getFunctionScopeIndex()))
2212  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2213  AI->getArgNo() + 1,
2214  llvm::Attribute::NonNull));
2215 
2216  QualType OTy = PVD->getOriginalType();
2217  if (const auto *ArrTy =
2218  getContext().getAsConstantArrayType(OTy)) {
2219  // A C99 array parameter declaration with the static keyword also
2220  // indicates dereferenceability, and if the size is constant we can
2221  // use the dereferenceable attribute (which requires the size in
2222  // bytes).
2223  if (ArrTy->getSizeModifier() == ArrayType::Static) {
2224  QualType ETy = ArrTy->getElementType();
2225  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2226  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2227  ArrSize) {
2228  llvm::AttrBuilder Attrs;
2229  Attrs.addDereferenceableAttr(
2230  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2231  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2232  AI->getArgNo() + 1, Attrs));
2233  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2234  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2235  AI->getArgNo() + 1,
2236  llvm::Attribute::NonNull));
2237  }
2238  }
2239  } else if (const auto *ArrTy =
2241  // For C99 VLAs with the static keyword, we don't know the size so
2242  // we can't use the dereferenceable attribute, but in addrspace(0)
2243  // we know that it must be nonnull.
2244  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2245  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2246  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2247  AI->getArgNo() + 1,
2248  llvm::Attribute::NonNull));
2249  }
2250 
2251  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2252  if (!AVAttr)
2253  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2254  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2255  if (AVAttr) {
2256  llvm::Value *AlignmentValue =
2257  EmitScalarExpr(AVAttr->getAlignment());
2258  llvm::ConstantInt *AlignmentCI =
2259  cast<llvm::ConstantInt>(AlignmentValue);
2260  unsigned Alignment =
2261  std::min((unsigned) AlignmentCI->getZExtValue(),
2262  +llvm::Value::MaximumAlignment);
2263 
2264  llvm::AttrBuilder Attrs;
2265  Attrs.addAlignmentAttr(Alignment);
2266  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2267  AI->getArgNo() + 1, Attrs));
2268  }
2269  }
2270 
2271  if (Arg->getType().isRestrictQualified())
2272  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2273  AI->getArgNo() + 1,
2274  llvm::Attribute::NoAlias));
2275 
2276  // LLVM expects swifterror parameters to be used in very restricted
2277  // ways. Copy the value into a less-restricted temporary.
2278  if (FI.getExtParameterInfo(ArgNo).getABI()
2280  QualType pointeeTy = Ty->getPointeeType();
2281  assert(pointeeTy->isPointerType());
2282  Address temp =
2283  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2284  Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2285  llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2286  Builder.CreateStore(incomingErrorValue, temp);
2287  V = temp.getPointer();
2288 
2289  // Push a cleanup to copy the value back at the end of the function.
2290  // The convention does not guarantee that the value will be written
2291  // back if the function exits with an unwind exception.
2292  EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2293  }
2294 
2295  // Ensure the argument is the correct type.
2296  if (V->getType() != ArgI.getCoerceToType())
2297  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2298 
2299  if (isPromoted)
2300  V = emitArgumentDemotion(*this, Arg, V);
2301 
2302  if (const CXXMethodDecl *MD =
2303  dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2304  if (MD->isVirtual() && Arg == CXXABIThisDecl)
2305  V = CGM.getCXXABI().
2306  adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2307  }
2308 
2309  // Because of merging of function types from multiple decls it is
2310  // possible for the type of an argument to not match the corresponding
2311  // type in the function type. Since we are codegening the callee
2312  // in here, add a cast to the argument type.
2313  llvm::Type *LTy = ConvertType(Arg->getType());
2314  if (V->getType() != LTy)
2315  V = Builder.CreateBitCast(V, LTy);
2316 
2317  ArgVals.push_back(ParamValue::forDirect(V));
2318  break;
2319  }
2320 
2321  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2322  Arg->getName());
2323 
2324  // Pointer to store into.
2325  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2326 
2327  // Fast-isel and the optimizer generally like scalar values better than
2328  // FCAs, so we flatten them if this is safe to do for this argument.
2329  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2330  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2331  STy->getNumElements() > 1) {
2332  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2333  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2334  llvm::Type *DstTy = Ptr.getElementType();
2335  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2336 
2337  Address AddrToStoreInto = Address::invalid();
2338  if (SrcSize <= DstSize) {
2339  AddrToStoreInto =
2340  Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2341  } else {
2342  AddrToStoreInto =
2343  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2344  }
2345 
2346  assert(STy->getNumElements() == NumIRArgs);
2347  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2348  auto AI = FnArgs[FirstIRArg + i];
2349  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2350  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2351  Address EltPtr =
2352  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2353  Builder.CreateStore(AI, EltPtr);
2354  }
2355 
2356  if (SrcSize > DstSize) {
2357  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2358  }
2359 
2360  } else {
2361  // Simple case, just do a coerced store of the argument into the alloca.
2362  assert(NumIRArgs == 1);
2363  auto AI = FnArgs[FirstIRArg];
2364  AI->setName(Arg->getName() + ".coerce");
2365  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2366  }
2367 
2368  // Match to what EmitParmDecl is expecting for this type.
2370  llvm::Value *V =
2371  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2372  if (isPromoted)
2373  V = emitArgumentDemotion(*this, Arg, V);
2374  ArgVals.push_back(ParamValue::forDirect(V));
2375  } else {
2376  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2377  }
2378  break;
2379  }
2380 
2382  // Reconstruct into a temporary.
2383  Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2384  ArgVals.push_back(ParamValue::forIndirect(alloca));
2385 
2386  auto coercionType = ArgI.getCoerceAndExpandType();
2387  alloca = Builder.CreateElementBitCast(alloca, coercionType);
2388  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2389 
2390  unsigned argIndex = FirstIRArg;
2391  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2392  llvm::Type *eltType = coercionType->getElementType(i);
2394  continue;
2395 
2396  auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2397  auto elt = FnArgs[argIndex++];
2398  Builder.CreateStore(elt, eltAddr);
2399  }
2400  assert(argIndex == FirstIRArg + NumIRArgs);
2401  break;
2402  }
2403 
2404  case ABIArgInfo::Expand: {
2405  // If this structure was expanded into multiple arguments then
2406  // we need to create a temporary and reconstruct it from the
2407  // arguments.
2408  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2409  LValue LV = MakeAddrLValue(Alloca, Ty);
2410  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2411 
2412  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2413  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2414  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2415  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2416  auto AI = FnArgs[FirstIRArg + i];
2417  AI->setName(Arg->getName() + "." + Twine(i));
2418  }
2419  break;
2420  }
2421 
2422  case ABIArgInfo::Ignore:
2423  assert(NumIRArgs == 0);
2424  // Initialize the local variable appropriately.
2425  if (!hasScalarEvaluationKind(Ty)) {
2426  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2427  } else {
2428  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2429  ArgVals.push_back(ParamValue::forDirect(U));
2430  }
2431  break;
2432  }
2433  }
2434 
2435  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2436  for (int I = Args.size() - 1; I >= 0; --I)
2437  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2438  } else {
2439  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2440  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2441  }
2442 }
2443 
2444 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2445  while (insn->use_empty()) {
2446  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2447  if (!bitcast) return;
2448 
2449  // This is "safe" because we would have used a ConstantExpr otherwise.
2450  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2451  bitcast->eraseFromParent();
2452  }
2453 }
2454 
2455 /// Try to emit a fused autorelease of a return result.
2457  llvm::Value *result) {
2458  // We must be immediately followed the cast.
2459  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2460  if (BB->empty()) return nullptr;
2461  if (&BB->back() != result) return nullptr;
2462 
2463  llvm::Type *resultType = result->getType();
2464 
2465  // result is in a BasicBlock and is therefore an Instruction.
2466  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2467 
2469 
2470  // Look for:
2471  // %generator = bitcast %type1* %generator2 to %type2*
2472  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2473  // We would have emitted this as a constant if the operand weren't
2474  // an Instruction.
2475  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2476 
2477  // Require the generator to be immediately followed by the cast.
2478  if (generator->getNextNode() != bitcast)
2479  return nullptr;
2480 
2481  insnsToKill.push_back(bitcast);
2482  }
2483 
2484  // Look for:
2485  // %generator = call i8* @objc_retain(i8* %originalResult)
2486  // or
2487  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2488  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2489  if (!call) return nullptr;
2490 
2491  bool doRetainAutorelease;
2492 
2493  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2494  doRetainAutorelease = true;
2495  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2497  doRetainAutorelease = false;
2498 
2499  // If we emitted an assembly marker for this call (and the
2500  // ARCEntrypoints field should have been set if so), go looking
2501  // for that call. If we can't find it, we can't do this
2502  // optimization. But it should always be the immediately previous
2503  // instruction, unless we needed bitcasts around the call.
2505  llvm::Instruction *prev = call->getPrevNode();
2506  assert(prev);
2507  if (isa<llvm::BitCastInst>(prev)) {
2508  prev = prev->getPrevNode();
2509  assert(prev);
2510  }
2511  assert(isa<llvm::CallInst>(prev));
2512  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2514  insnsToKill.push_back(prev);
2515  }
2516  } else {
2517  return nullptr;
2518  }
2519 
2520  result = call->getArgOperand(0);
2521  insnsToKill.push_back(call);
2522 
2523  // Keep killing bitcasts, for sanity. Note that we no longer care
2524  // about precise ordering as long as there's exactly one use.
2525  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2526  if (!bitcast->hasOneUse()) break;
2527  insnsToKill.push_back(bitcast);
2528  result = bitcast->getOperand(0);
2529  }
2530 
2531  // Delete all the unnecessary instructions, from latest to earliest.
2533  i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2534  (*i)->eraseFromParent();
2535 
2536  // Do the fused retain/autorelease if we were asked to.
2537  if (doRetainAutorelease)
2538  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2539 
2540  // Cast back to the result type.
2541  return CGF.Builder.CreateBitCast(result, resultType);
2542 }
2543 
2544 /// If this is a +1 of the value of an immutable 'self', remove it.
2546  llvm::Value *result) {
2547  // This is only applicable to a method with an immutable 'self'.
2548  const ObjCMethodDecl *method =
2549  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2550  if (!method) return nullptr;
2551  const VarDecl *self = method->getSelfDecl();
2552  if (!self->getType().isConstQualified()) return nullptr;
2553 
2554  // Look for a retain call.
2555  llvm::CallInst *retainCall =
2556  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2557  if (!retainCall ||
2558  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2559  return nullptr;
2560 
2561  // Look for an ordinary load of 'self'.
2562  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2563  llvm::LoadInst *load =
2564  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2565  if (!load || load->isAtomic() || load->isVolatile() ||
2566  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2567  return nullptr;
2568 
2569  // Okay! Burn it all down. This relies for correctness on the
2570  // assumption that the retain is emitted as part of the return and
2571  // that thereafter everything is used "linearly".
2572  llvm::Type *resultType = result->getType();
2573  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2574  assert(retainCall->use_empty());
2575  retainCall->eraseFromParent();
2576  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2577 
2578  return CGF.Builder.CreateBitCast(load, resultType);
2579 }
2580 
2581 /// Emit an ARC autorelease of the result of a function.
2582 ///
2583 /// \return the value to actually return from the function
2585  llvm::Value *result) {
2586  // If we're returning 'self', kill the initial retain. This is a
2587  // heuristic attempt to "encourage correctness" in the really unfortunate
2588  // case where we have a return of self during a dealloc and we desperately
2589  // need to avoid the possible autorelease.
2590  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2591  return self;
2592 
2593  // At -O0, try to emit a fused retain/autorelease.
2594  if (CGF.shouldUseFusedARCCalls())
2595  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2596  return fused;
2597 
2598  return CGF.EmitARCAutoreleaseReturnValue(result);
2599 }
2600 
2601 /// Heuristically search for a dominating store to the return-value slot.
2602 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2603  // Check if a User is a store which pointerOperand is the ReturnValue.
2604  // We are looking for stores to the ReturnValue, not for stores of the
2605  // ReturnValue to some other location.
2606  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2607  auto *SI = dyn_cast<llvm::StoreInst>(U);
2608  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2609  return nullptr;
2610  // These aren't actually possible for non-coerced returns, and we
2611  // only care about non-coerced returns on this code path.
2612  assert(!SI->isAtomic() && !SI->isVolatile());
2613  return SI;
2614  };
2615  // If there are multiple uses of the return-value slot, just check
2616  // for something immediately preceding the IP. Sometimes this can
2617  // happen with how we generate implicit-returns; it can also happen
2618  // with noreturn cleanups.
2619  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2620  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2621  if (IP->empty()) return nullptr;
2622  llvm::Instruction *I = &IP->back();
2623 
2624  // Skip lifetime markers
2625  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2626  IE = IP->rend();
2627  II != IE; ++II) {
2628  if (llvm::IntrinsicInst *Intrinsic =
2629  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2630  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2631  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2632  ++II;
2633  if (II == IE)
2634  break;
2635  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2636  continue;
2637  }
2638  }
2639  I = &*II;
2640  break;
2641  }
2642 
2643  return GetStoreIfValid(I);
2644  }
2645 
2646  llvm::StoreInst *store =
2647  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2648  if (!store) return nullptr;
2649 
2650  // Now do a first-and-dirty dominance check: just walk up the
2651  // single-predecessors chain from the current insertion point.
2652  llvm::BasicBlock *StoreBB = store->getParent();
2653  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2654  while (IP != StoreBB) {
2655  if (!(IP = IP->getSinglePredecessor()))
2656  return nullptr;
2657  }
2658 
2659  // Okay, the store's basic block dominates the insertion point; we
2660  // can do our thing.
2661  return store;
2662 }
2663 
2665  bool EmitRetDbgLoc,
2666  SourceLocation EndLoc) {
2667  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2668  // Naked functions don't have epilogues.
2669  Builder.CreateUnreachable();
2670  return;
2671  }
2672 
2673  // Functions with no result always return void.
2674  if (!ReturnValue.isValid()) {
2675  Builder.CreateRetVoid();
2676  return;
2677  }
2678 
2679  llvm::DebugLoc RetDbgLoc;
2680  llvm::Value *RV = nullptr;
2681  QualType RetTy = FI.getReturnType();
2682  const ABIArgInfo &RetAI = FI.getReturnInfo();
2683 
2684  switch (RetAI.getKind()) {
2685  case ABIArgInfo::InAlloca:
2686  // Aggregrates get evaluated directly into the destination. Sometimes we
2687  // need to return the sret value in a register, though.
2688  assert(hasAggregateEvaluationKind(RetTy));
2689  if (RetAI.getInAllocaSRet()) {
2690  llvm::Function::arg_iterator EI = CurFn->arg_end();
2691  --EI;
2692  llvm::Value *ArgStruct = &*EI;
2694  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2695  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2696  }
2697  break;
2698 
2699  case ABIArgInfo::Indirect: {
2700  auto AI = CurFn->arg_begin();
2701  if (RetAI.isSRetAfterThis())
2702  ++AI;
2703  switch (getEvaluationKind(RetTy)) {
2704  case TEK_Complex: {
2705  ComplexPairTy RT =
2706  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2708  /*isInit*/ true);
2709  break;
2710  }
2711  case TEK_Aggregate:
2712  // Do nothing; aggregrates get evaluated directly into the destination.
2713  break;
2714  case TEK_Scalar:
2716  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2717  /*isInit*/ true);
2718  break;
2719  }
2720  break;
2721  }
2722 
2723  case ABIArgInfo::Extend:
2724  case ABIArgInfo::Direct:
2725  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2726  RetAI.getDirectOffset() == 0) {
2727  // The internal return value temp always will have pointer-to-return-type
2728  // type, just do a load.
2729 
2730  // If there is a dominating store to ReturnValue, we can elide
2731  // the load, zap the store, and usually zap the alloca.
2732  if (llvm::StoreInst *SI =
2734  // Reuse the debug location from the store unless there is
2735  // cleanup code to be emitted between the store and return
2736  // instruction.
2737  if (EmitRetDbgLoc && !AutoreleaseResult)
2738  RetDbgLoc = SI->getDebugLoc();
2739  // Get the stored value and nuke the now-dead store.
2740  RV = SI->getValueOperand();
2741  SI->eraseFromParent();
2742 
2743  // If that was the only use of the return value, nuke it as well now.
2744  auto returnValueInst = ReturnValue.getPointer();
2745  if (returnValueInst->use_empty()) {
2746  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2747  alloca->eraseFromParent();
2749  }
2750  }
2751 
2752  // Otherwise, we have to do a simple load.
2753  } else {
2755  }
2756  } else {
2757  // If the value is offset in memory, apply the offset now.
2758  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2759 
2760  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2761  }
2762 
2763  // In ARC, end functions that return a retainable type with a call
2764  // to objc_autoreleaseReturnValue.
2765  if (AutoreleaseResult) {
2766 #ifndef NDEBUG
2767  // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2768  // been stripped of the typedefs, so we cannot use RetTy here. Get the
2769  // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2770  // CurCodeDecl or BlockInfo.
2771  QualType RT;
2772 
2773  if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2774  RT = FD->getReturnType();
2775  else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2776  RT = MD->getReturnType();
2777  else if (isa<BlockDecl>(CurCodeDecl))
2779  else
2780  llvm_unreachable("Unexpected function/method type");
2781 
2782  assert(getLangOpts().ObjCAutoRefCount &&
2783  !FI.isReturnsRetained() &&
2784  RT->isObjCRetainableType());
2785 #endif
2786  RV = emitAutoreleaseOfResult(*this, RV);
2787  }
2788 
2789  break;
2790 
2791  case ABIArgInfo::Ignore:
2792  break;
2793 
2795  auto coercionType = RetAI.getCoerceAndExpandType();
2796  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2797 
2798  // Load all of the coerced elements out into results.
2800  Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2801  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2802  auto coercedEltType = coercionType->getElementType(i);
2803  if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2804  continue;
2805 
2806  auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2807  auto elt = Builder.CreateLoad(eltAddr);
2808  results.push_back(elt);
2809  }
2810 
2811  // If we have one result, it's the single direct result type.
2812  if (results.size() == 1) {
2813  RV = results[0];
2814 
2815  // Otherwise, we need to make a first-class aggregate.
2816  } else {
2817  // Construct a return type that lacks padding elements.
2818  llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2819 
2820  RV = llvm::UndefValue::get(returnType);
2821  for (unsigned i = 0, e = results.size(); i != e; ++i) {
2822  RV = Builder.CreateInsertValue(RV, results[i], i);
2823  }
2824  }
2825  break;
2826  }
2827 
2828  case ABIArgInfo::Expand:
2829  llvm_unreachable("Invalid ABI kind for return argument");
2830  }
2831 
2832  llvm::Instruction *Ret;
2833  if (RV) {
2834  if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2835  if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2836  SanitizerScope SanScope(this);
2837  llvm::Value *Cond = Builder.CreateICmpNE(
2838  RV, llvm::Constant::getNullValue(RV->getType()));
2839  llvm::Constant *StaticData[] = {
2840  EmitCheckSourceLocation(EndLoc),
2841  EmitCheckSourceLocation(RetNNAttr->getLocation()),
2842  };
2843  EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2844  "nonnull_return", StaticData, None);
2845  }
2846  }
2847  Ret = Builder.CreateRet(RV);
2848  } else {
2849  Ret = Builder.CreateRetVoid();
2850  }
2851 
2852  if (RetDbgLoc)
2853  Ret->setDebugLoc(std::move(RetDbgLoc));
2854 }
2855 
2857  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2858  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2859 }
2860 
2862  QualType Ty) {
2863  // FIXME: Generate IR in one pass, rather than going back and fixing up these
2864  // placeholders.
2865  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2866  llvm::Value *Placeholder =
2867  llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2868  Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2869 
2870  // FIXME: When we generate this IR in one pass, we shouldn't need
2871  // this win32-specific alignment hack.
2873 
2874  return AggValueSlot::forAddr(Address(Placeholder, Align),
2875  Ty.getQualifiers(),
2879 }
2880 
2882  const VarDecl *param,
2883  SourceLocation loc) {
2884  // StartFunction converted the ABI-lowered parameter(s) into a
2885  // local alloca. We need to turn that into an r-value suitable
2886  // for EmitCall.
2887  Address local = GetAddrOfLocalVar(param);
2888 
2889  QualType type = param->getType();
2890 
2891  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2892  "cannot emit delegate call arguments for inalloca arguments!");
2893 
2894  // For the most part, we just need to load the alloca, except that
2895  // aggregate r-values are actually pointers to temporaries.
2896  if (type->isReferenceType())
2897  args.add(RValue::get(Builder.CreateLoad(local)), type);
2898  else
2899  args.add(convertTempToRValue(local, type, loc), type);
2900 }
2901 
2902 static bool isProvablyNull(llvm::Value *addr) {
2903  return isa<llvm::ConstantPointerNull>(addr);
2904 }
2905 
2906 static bool isProvablyNonNull(llvm::Value *addr) {
2907  return isa<llvm::AllocaInst>(addr);
2908 }
2909 
2910 /// Emit the actual writing-back of a writeback.
2912  const CallArgList::Writeback &writeback) {
2913  const LValue &srcLV = writeback.Source;
2914  Address srcAddr = srcLV.getAddress();
2915  assert(!isProvablyNull(srcAddr.getPointer()) &&
2916  "shouldn't have writeback for provably null argument");
2917 
2918  llvm::BasicBlock *contBB = nullptr;
2919 
2920  // If the argument wasn't provably non-null, we need to null check
2921  // before doing the store.
2922  bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2923  if (!provablyNonNull) {
2924  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2925  contBB = CGF.createBasicBlock("icr.done");
2926 
2927  llvm::Value *isNull =
2928  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2929  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2930  CGF.EmitBlock(writebackBB);
2931  }
2932 
2933  // Load the value to writeback.
2934  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2935 
2936  // Cast it back, in case we're writing an id to a Foo* or something.
2937  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2938  "icr.writeback-cast");
2939 
2940  // Perform the writeback.
2941 
2942  // If we have a "to use" value, it's something we need to emit a use
2943  // of. This has to be carefully threaded in: if it's done after the
2944  // release it's potentially undefined behavior (and the optimizer
2945  // will ignore it), and if it happens before the retain then the
2946  // optimizer could move the release there.
2947  if (writeback.ToUse) {
2948  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2949 
2950  // Retain the new value. No need to block-copy here: the block's
2951  // being passed up the stack.
2952  value = CGF.EmitARCRetainNonBlock(value);
2953 
2954  // Emit the intrinsic use here.
2955  CGF.EmitARCIntrinsicUse(writeback.ToUse);
2956 
2957  // Load the old value (primitively).
2958  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2959 
2960  // Put the new value in place (primitively).
2961  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2962 
2963  // Release the old value.
2964  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2965 
2966  // Otherwise, we can just do a normal lvalue store.
2967  } else {
2968  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2969  }
2970 
2971  // Jump to the continuation block.
2972  if (!provablyNonNull)
2973  CGF.EmitBlock(contBB);
2974 }
2975 
2977  const CallArgList &args) {
2978  for (const auto &I : args.writebacks())
2979  emitWriteback(CGF, I);
2980 }
2981 
2983  const CallArgList &CallArgs) {
2986  CallArgs.getCleanupsToDeactivate();
2987  // Iterate in reverse to increase the likelihood of popping the cleanup.
2988  for (const auto &I : llvm::reverse(Cleanups)) {
2989  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2990  I.IsActiveIP->eraseFromParent();
2991  }
2992 }
2993 
2994 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2995  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2996  if (uop->getOpcode() == UO_AddrOf)
2997  return uop->getSubExpr();
2998  return nullptr;
2999 }
3000 
3001 /// Emit an argument that's being passed call-by-writeback. That is,
3002 /// we are passing the address of an __autoreleased temporary; it
3003 /// might be copy-initialized with the current value of the given
3004 /// address, but it will definitely be copied out of after the call.
3006  const ObjCIndirectCopyRestoreExpr *CRE) {
3007  LValue srcLV;
3008 
3009  // Make an optimistic effort to emit the address as an l-value.
3010  // This can fail if the argument expression is more complicated.
3011  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3012  srcLV = CGF.EmitLValue(lvExpr);
3013 
3014  // Otherwise, just emit it as a scalar.
3015  } else {
3016  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3017 
3018  QualType srcAddrType =
3019  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3020  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3021  }
3022  Address srcAddr = srcLV.getAddress();
3023 
3024  // The dest and src types don't necessarily match in LLVM terms
3025  // because of the crazy ObjC compatibility rules.
3026 
3027  llvm::PointerType *destType =
3028  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3029 
3030  // If the address is a constant null, just pass the appropriate null.
3031  if (isProvablyNull(srcAddr.getPointer())) {
3032  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3033  CRE->getType());
3034  return;
3035  }
3036 
3037  // Create the temporary.
3038  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3039  CGF.getPointerAlign(),
3040  "icr.temp");
3041  // Loading an l-value can introduce a cleanup if the l-value is __weak,
3042  // and that cleanup will be conditional if we can't prove that the l-value
3043  // isn't null, so we need to register a dominating point so that the cleanups
3044  // system will make valid IR.
3046 
3047  // Zero-initialize it if we're not doing a copy-initialization.
3048  bool shouldCopy = CRE->shouldCopy();
3049  if (!shouldCopy) {
3050  llvm::Value *null =
3051  llvm::ConstantPointerNull::get(
3052  cast<llvm::PointerType>(destType->getElementType()));
3053  CGF.Builder.CreateStore(null, temp);
3054  }
3055 
3056  llvm::BasicBlock *contBB = nullptr;
3057  llvm::BasicBlock *originBB = nullptr;
3058 
3059  // If the address is *not* known to be non-null, we need to switch.
3060  llvm::Value *finalArgument;
3061 
3062  bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
3063  if (provablyNonNull) {
3064  finalArgument = temp.getPointer();
3065  } else {
3066  llvm::Value *isNull =
3067  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3068 
3069  finalArgument = CGF.Builder.CreateSelect(isNull,
3070  llvm::ConstantPointerNull::get(destType),
3071  temp.getPointer(), "icr.argument");
3072 
3073  // If we need to copy, then the load has to be conditional, which
3074  // means we need control flow.
3075  if (shouldCopy) {
3076  originBB = CGF.Builder.GetInsertBlock();
3077  contBB = CGF.createBasicBlock("icr.cont");
3078  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3079  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3080  CGF.EmitBlock(copyBB);
3081  condEval.begin(CGF);
3082  }
3083  }
3084 
3085  llvm::Value *valueToUse = nullptr;
3086 
3087  // Perform a copy if necessary.
3088  if (shouldCopy) {
3089  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3090  assert(srcRV.isScalar());
3091 
3092  llvm::Value *src = srcRV.getScalarVal();
3093  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3094  "icr.cast");
3095 
3096  // Use an ordinary store, not a store-to-lvalue.
3097  CGF.Builder.CreateStore(src, temp);
3098 
3099  // If optimization is enabled, and the value was held in a
3100  // __strong variable, we need to tell the optimizer that this
3101  // value has to stay alive until we're doing the store back.
3102  // This is because the temporary is effectively unretained,
3103  // and so otherwise we can violate the high-level semantics.
3104  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3106  valueToUse = src;
3107  }
3108  }
3109 
3110  // Finish the control flow if we needed it.
3111  if (shouldCopy && !provablyNonNull) {
3112  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3113  CGF.EmitBlock(contBB);
3114 
3115  // Make a phi for the value to intrinsically use.
3116  if (valueToUse) {
3117  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3118  "icr.to-use");
3119  phiToUse->addIncoming(valueToUse, copyBB);
3120  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3121  originBB);
3122  valueToUse = phiToUse;
3123  }
3124 
3125  condEval.end(CGF);
3126  }
3127 
3128  args.addWriteback(srcLV, temp, valueToUse);
3129  args.add(RValue::get(finalArgument), CRE->getType());
3130 }
3131 
3133  assert(!StackBase && !StackCleanup.isValid());
3134 
3135  // Save the stack.
3136  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3137  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3138 }
3139 
3141  if (StackBase) {
3142  // Restore the stack after the call.
3143  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3144  CGF.Builder.CreateCall(F, StackBase);
3145  }
3146 }
3147 
3149  SourceLocation ArgLoc,
3150  const FunctionDecl *FD,
3151  unsigned ParmNum) {
3152  if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
3153  return;
3154  auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
3155  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3156  auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
3157  if (!NNAttr)
3158  return;
3159  SanitizerScope SanScope(this);
3160  assert(RV.isScalar());
3161  llvm::Value *V = RV.getScalarVal();
3162  llvm::Value *Cond =
3163  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3164  llvm::Constant *StaticData[] = {
3165  EmitCheckSourceLocation(ArgLoc),
3166  EmitCheckSourceLocation(NNAttr->getLocation()),
3167  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3168  };
3169  EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
3170  "nonnull_arg", StaticData, None);
3171 }
3172 
3174  CallArgList &Args, ArrayRef<QualType> ArgTypes,
3175  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3176  const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
3177  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3178 
3179  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
3180  if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
3181  return;
3182  auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3183  if (PS == nullptr)
3184  return;
3185 
3186  const auto &Context = getContext();
3187  auto SizeTy = Context.getSizeType();
3188  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3189  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
3190  Args.add(RValue::get(V), SizeTy);
3191  };
3192 
3193  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3194  // because arguments are destroyed left to right in the callee.
3196  // Insert a stack save if we're going to need any inalloca args.
3197  bool HasInAllocaArgs = false;
3198  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3199  I != E && !HasInAllocaArgs; ++I)
3200  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3201  if (HasInAllocaArgs) {
3202  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3203  Args.allocateArgumentMemory(*this);
3204  }
3205 
3206  // Evaluate each argument.
3207  size_t CallArgsStart = Args.size();
3208  for (int I = ArgTypes.size() - 1; I >= 0; --I) {
3209  CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3210  MaybeEmitImplicitObjectSize(I, *Arg);
3211  EmitCallArg(Args, *Arg, ArgTypes[I]);
3212  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3213  CalleeDecl, ParamsToSkip + I);
3214  }
3215 
3216  // Un-reverse the arguments we just evaluated so they match up with the LLVM
3217  // IR function.
3218  std::reverse(Args.begin() + CallArgsStart, Args.end());
3219  return;
3220  }
3221 
3222  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3223  CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3224  assert(Arg != ArgRange.end());
3225  EmitCallArg(Args, *Arg, ArgTypes[I]);
3226  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3227  CalleeDecl, ParamsToSkip + I);
3228  MaybeEmitImplicitObjectSize(I, *Arg);
3229  }
3230 }
3231 
3232 namespace {
3233 
3234 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3235  DestroyUnpassedArg(Address Addr, QualType Ty)
3236  : Addr(Addr), Ty(Ty) {}
3237 
3238  Address Addr;
3239  QualType Ty;
3240 
3241  void Emit(CodeGenFunction &CGF, Flags flags) override {
3242  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3243  assert(!Dtor->isTrivial());
3244  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3245  /*Delegating=*/false, Addr);
3246  }
3247 };
3248 
3249 struct DisableDebugLocationUpdates {
3250  CodeGenFunction &CGF;
3251  bool disabledDebugInfo;
3252  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3253  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3254  CGF.disableDebugInfo();
3255  }
3256  ~DisableDebugLocationUpdates() {
3257  if (disabledDebugInfo)
3258  CGF.enableDebugInfo();
3259  }
3260 };
3261 
3262 } // end anonymous namespace
3263 
3265  QualType type) {
3266  DisableDebugLocationUpdates Dis(*this, E);
3267  if (const ObjCIndirectCopyRestoreExpr *CRE
3268  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3269  assert(getLangOpts().ObjCAutoRefCount);
3270  assert(getContext().hasSameType(E->getType(), type));
3271  return emitWritebackArg(*this, args, CRE);
3272  }
3273 
3274  assert(type->isReferenceType() == E->isGLValue() &&
3275  "reference binding to unmaterialized r-value!");
3276 
3277  if (E->isGLValue()) {
3278  assert(E->getObjectKind() == OK_Ordinary);
3279  return args.add(EmitReferenceBindingToExpr(E), type);
3280  }
3281 
3282  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3283 
3284  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3285  // However, we still have to push an EH-only cleanup in case we unwind before
3286  // we make it to the call.
3287  if (HasAggregateEvalKind &&
3289  // If we're using inalloca, use the argument memory. Otherwise, use a
3290  // temporary.
3291  AggValueSlot Slot;
3292  if (args.isUsingInAlloca())
3293  Slot = createPlaceholderSlot(*this, type);
3294  else
3295  Slot = CreateAggTemp(type, "agg.tmp");
3296 
3297  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3298  bool DestroyedInCallee =
3299  RD && RD->hasNonTrivialDestructor() &&
3301  if (DestroyedInCallee)
3302  Slot.setExternallyDestructed();
3303 
3304  EmitAggExpr(E, Slot);
3305  RValue RV = Slot.asRValue();
3306  args.add(RV, type);
3307 
3308  if (DestroyedInCallee) {
3309  // Create a no-op GEP between the placeholder and the cleanup so we can
3310  // RAUW it successfully. It also serves as a marker of the first
3311  // instruction where the cleanup is active.
3312  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3313  type);
3314  // This unreachable is a temporary marker which will be removed later.
3315  llvm::Instruction *IsActive = Builder.CreateUnreachable();
3317  }
3318  return;
3319  }
3320 
3321  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3322  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3323  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3324  assert(L.isSimple());
3325  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3326  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3327  } else {
3328  // We can't represent a misaligned lvalue in the CallArgList, so copy
3329  // to an aligned temporary now.
3330  Address tmp = CreateMemTemp(type);
3331  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3332  args.add(RValue::getAggregate(tmp), type);
3333  }
3334  return;
3335  }
3336 
3337  args.add(EmitAnyExprToTemp(E), type);
3338 }
3339 
3340 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3341  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3342  // implicitly widens null pointer constants that are arguments to varargs
3343  // functions to pointer-sized ints.
3344  if (!getTarget().getTriple().isOSWindows())
3345  return Arg->getType();
3346 
3347  if (Arg->getType()->isIntegerType() &&
3348  getContext().getTypeSize(Arg->getType()) <
3352  return getContext().getIntPtrType();
3353  }
3354 
3355  return Arg->getType();
3356 }
3357 
3358 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3359 // optimizer it can aggressively ignore unwind edges.
3360 void
3361 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3362  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3363  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3364  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3366 }
3367 
3368 /// Emits a call to the given no-arguments nounwind runtime function.
3369 llvm::CallInst *
3371  const llvm::Twine &name) {
3372  return EmitNounwindRuntimeCall(callee, None, name);
3373 }
3374 
3375 /// Emits a call to the given nounwind runtime function.
3376 llvm::CallInst *
3379  const llvm::Twine &name) {
3380  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3381  call->setDoesNotThrow();
3382  return call;
3383 }
3384 
3385 /// Emits a simple call (never an invoke) to the given no-arguments
3386 /// runtime function.
3387 llvm::CallInst *
3389  const llvm::Twine &name) {
3390  return EmitRuntimeCall(callee, None, name);
3391 }
3392 
3393 // Calls which may throw must have operand bundles indicating which funclet
3394 // they are nested within.
3395 static void
3398  // There is no need for a funclet operand bundle if we aren't inside a
3399  // funclet.
3400  if (!CurrentFuncletPad)
3401  return;
3402 
3403  // Skip intrinsics which cannot throw.
3404  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3405  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3406  return;
3407 
3408  BundleList.emplace_back("funclet", CurrentFuncletPad);
3409 }
3410 
3411 /// Emits a simple call (never an invoke) to the given runtime function.
3412 llvm::CallInst *
3415  const llvm::Twine &name) {
3417  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3418 
3419  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3420  call->setCallingConv(getRuntimeCC());
3421  return call;
3422 }
3423 
3424 /// Emits a call or invoke to the given noreturn runtime function.
3426  ArrayRef<llvm::Value*> args) {
3428  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3429 
3430  if (getInvokeDest()) {
3431  llvm::InvokeInst *invoke =
3432  Builder.CreateInvoke(callee,
3434  getInvokeDest(),
3435  args,
3436  BundleList);
3437  invoke->setDoesNotReturn();
3438  invoke->setCallingConv(getRuntimeCC());
3439  } else {
3440  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3441  call->setDoesNotReturn();
3442  call->setCallingConv(getRuntimeCC());
3443  Builder.CreateUnreachable();
3444  }
3445 }
3446 
3447 /// Emits a call or invoke instruction to the given nullary runtime function.
3448 llvm::CallSite
3450  const Twine &name) {
3451  return EmitRuntimeCallOrInvoke(callee, None, name);
3452 }
3453 
3454 /// Emits a call or invoke instruction to the given runtime function.
3455 llvm::CallSite
3458  const Twine &name) {
3459  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3460  callSite.setCallingConv(getRuntimeCC());
3461  return callSite;
3462 }
3463 
3464 /// Emits a call or invoke instruction to the given function, depending
3465 /// on the current state of the EH stack.
3466 llvm::CallSite
3469  const Twine &Name) {
3470  llvm::BasicBlock *InvokeDest = getInvokeDest();
3472  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3473 
3474  llvm::Instruction *Inst;
3475  if (!InvokeDest)
3476  Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3477  else {
3478  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3479  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3480  Name);
3481  EmitBlock(ContBB);
3482  }
3483 
3484  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3485  // optimizer it can aggressively ignore unwind edges.
3486  if (CGM.getLangOpts().ObjCAutoRefCount)
3487  AddObjCARCExceptionMetadata(Inst);
3488 
3489  return llvm::CallSite(Inst);
3490 }
3491 
3492 /// \brief Store a non-aggregate value to an address to initialize it. For
3493 /// initialization, a non-atomic store will be used.
3495  LValue Dst) {
3496  if (Src.isScalar())
3497  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3498  else
3499  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3500 }
3501 
3502 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3503  llvm::Value *New) {
3504  DeferredReplacements.push_back(std::make_pair(Old, New));
3505 }
3506 
3508  llvm::Value *Callee,
3510  const CallArgList &CallArgs,
3511  CGCalleeInfo CalleeInfo,
3512  llvm::Instruction **callOrInvoke) {
3513  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3514 
3515  // Handle struct-return functions by passing a pointer to the
3516  // location that we would like to return into.
3517  QualType RetTy = CallInfo.getReturnType();
3518  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3519 
3520  llvm::FunctionType *IRFuncTy =
3521  cast<llvm::FunctionType>(
3522  cast<llvm::PointerType>(Callee->getType())->getElementType());
3523 
3524  // If we're using inalloca, insert the allocation after the stack save.
3525  // FIXME: Do this earlier rather than hacking it in here!
3526  Address ArgMemory = Address::invalid();
3527  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3528  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3529  ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3530  llvm::Instruction *IP = CallArgs.getStackBase();
3531  llvm::AllocaInst *AI;
3532  if (IP) {
3533  IP = IP->getNextNode();
3534  AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3535  } else {
3536  AI = CreateTempAlloca(ArgStruct, "argmem");
3537  }
3538  auto Align = CallInfo.getArgStructAlignment();
3539  AI->setAlignment(Align.getQuantity());
3540  AI->setUsedWithInAlloca(true);
3541  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3542  ArgMemory = Address(AI, Align);
3543  }
3544 
3545  // Helper function to drill into the inalloca allocation.
3546  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3547  auto FieldOffset =
3548  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3549  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3550  };
3551 
3552  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3553  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3554 
3555  // If the call returns a temporary with struct return, create a temporary
3556  // alloca to hold the result, unless one is given to us.
3557  Address SRetPtr = Address::invalid();
3558  size_t UnusedReturnSize = 0;
3559  if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3560  if (!ReturnValue.isNull()) {
3561  SRetPtr = ReturnValue.getValue();
3562  } else {
3563  SRetPtr = CreateMemTemp(RetTy);
3564  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3565  uint64_t size =
3566  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3567  if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3568  UnusedReturnSize = size;
3569  }
3570  }
3571  if (IRFunctionArgs.hasSRetArg()) {
3572  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3573  } else if (RetAI.isInAlloca()) {
3574  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3575  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3576  }
3577  }
3578 
3579  Address swiftErrorTemp = Address::invalid();
3580  Address swiftErrorArg = Address::invalid();
3581 
3582  assert(CallInfo.arg_size() == CallArgs.size() &&
3583  "Mismatch between function signature & arguments.");
3584  unsigned ArgNo = 0;
3585  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3586  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3587  I != E; ++I, ++info_it, ++ArgNo) {
3588  const ABIArgInfo &ArgInfo = info_it->info;
3589  RValue RV = I->RV;
3590 
3591  // Insert a padding argument to ensure proper alignment.
3592  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3593  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3594  llvm::UndefValue::get(ArgInfo.getPaddingType());
3595 
3596  unsigned FirstIRArg, NumIRArgs;
3597  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3598 
3599  switch (ArgInfo.getKind()) {
3600  case ABIArgInfo::InAlloca: {
3601  assert(NumIRArgs == 0);
3602  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3603  if (RV.isAggregate()) {
3604  // Replace the placeholder with the appropriate argument slot GEP.
3605  llvm::Instruction *Placeholder =
3606  cast<llvm::Instruction>(RV.getAggregatePointer());
3607  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3608  Builder.SetInsertPoint(Placeholder);
3609  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3610  Builder.restoreIP(IP);
3611  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3612  } else {
3613  // Store the RValue into the argument struct.
3614  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3615  unsigned AS = Addr.getType()->getPointerAddressSpace();
3616  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3617  // There are some cases where a trivial bitcast is not avoidable. The
3618  // definition of a type later in a translation unit may change it's type
3619  // from {}* to (%struct.foo*)*.
3620  if (Addr.getType() != MemType)
3621  Addr = Builder.CreateBitCast(Addr, MemType);
3622  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3623  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3624  }
3625  break;
3626  }
3627 
3628  case ABIArgInfo::Indirect: {
3629  assert(NumIRArgs == 1);
3630  if (RV.isScalar() || RV.isComplex()) {
3631  // Make a temporary alloca to pass the argument.
3632  Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3633  IRCallArgs[FirstIRArg] = Addr.getPointer();
3634 
3635  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3636  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3637  } else {
3638  // We want to avoid creating an unnecessary temporary+copy here;
3639  // however, we need one in three cases:
3640  // 1. If the argument is not byval, and we are required to copy the
3641  // source. (This case doesn't occur on any common architecture.)
3642  // 2. If the argument is byval, RV is not sufficiently aligned, and
3643  // we cannot force it to be sufficiently aligned.
3644  // 3. If the argument is byval, but RV is located in an address space
3645  // different than that of the argument (0).
3646  Address Addr = RV.getAggregateAddress();
3647  CharUnits Align = ArgInfo.getIndirectAlign();
3648  const llvm::DataLayout *TD = &CGM.getDataLayout();
3649  const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3650  const unsigned ArgAddrSpace =
3651  (FirstIRArg < IRFuncTy->getNumParams()
3652  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3653  : 0);
3654  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3655  (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3656  llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3657  Align.getQuantity(), *TD)
3658  < Align.getQuantity()) ||
3659  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3660  // Create an aligned temporary, and copy to it.
3661  Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3662  IRCallArgs[FirstIRArg] = AI.getPointer();
3663  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3664  } else {
3665  // Skip the extra memcpy call.
3666  IRCallArgs[FirstIRArg] = Addr.getPointer();
3667  }
3668  }
3669  break;
3670  }
3671 
3672  case ABIArgInfo::Ignore:
3673  assert(NumIRArgs == 0);
3674  break;
3675 
3676  case ABIArgInfo::Extend:
3677  case ABIArgInfo::Direct: {
3678  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3679  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3680  ArgInfo.getDirectOffset() == 0) {
3681  assert(NumIRArgs == 1);
3682  llvm::Value *V;
3683  if (RV.isScalar())
3684  V = RV.getScalarVal();
3685  else
3687 
3688  // Implement swifterror by copying into a new swifterror argument.
3689  // We'll write back in the normal path out of the call.
3690  if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3692  assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3693 
3694  QualType pointeeTy = I->Ty->getPointeeType();
3695  swiftErrorArg =
3696  Address(V, getContext().getTypeAlignInChars(pointeeTy));
3697 
3698  swiftErrorTemp =
3699  CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3700  V = swiftErrorTemp.getPointer();
3701  cast<llvm::AllocaInst>(V)->setSwiftError(true);
3702 
3703  llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3704  Builder.CreateStore(errorValue, swiftErrorTemp);
3705  }
3706 
3707  // We might have to widen integers, but we should never truncate.
3708  if (ArgInfo.getCoerceToType() != V->getType() &&
3709  V->getType()->isIntegerTy())
3710  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3711 
3712  // If the argument doesn't match, perform a bitcast to coerce it. This
3713  // can happen due to trivial type mismatches.
3714  if (FirstIRArg < IRFuncTy->getNumParams() &&
3715  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3716  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3717 
3718  IRCallArgs[FirstIRArg] = V;
3719  break;
3720  }
3721 
3722  // FIXME: Avoid the conversion through memory if possible.
3723  Address Src = Address::invalid();
3724  if (RV.isScalar() || RV.isComplex()) {
3725  Src = CreateMemTemp(I->Ty, "coerce");
3726  LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3727  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3728  } else {
3729  Src = RV.getAggregateAddress();
3730  }
3731 
3732  // If the value is offset in memory, apply the offset now.
3733  Src = emitAddressAtOffset(*this, Src, ArgInfo);
3734 
3735  // Fast-isel and the optimizer generally like scalar values better than
3736  // FCAs, so we flatten them if this is safe to do for this argument.
3737  llvm::StructType *STy =
3738  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3739  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3740  llvm::Type *SrcTy = Src.getType()->getElementType();
3741  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3742  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3743 
3744  // If the source type is smaller than the destination type of the
3745  // coerce-to logic, copy the source value into a temp alloca the size
3746  // of the destination type to allow loading all of it. The bits past
3747  // the source value are left undef.
3748  if (SrcSize < DstSize) {
3749  Address TempAlloca
3750  = CreateTempAlloca(STy, Src.getAlignment(),
3751  Src.getName() + ".coerce");
3752  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3753  Src = TempAlloca;
3754  } else {
3755  Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3756  }
3757 
3758  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3759  assert(NumIRArgs == STy->getNumElements());
3760  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3761  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3762  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3763  llvm::Value *LI = Builder.CreateLoad(EltPtr);
3764  IRCallArgs[FirstIRArg + i] = LI;
3765  }
3766  } else {
3767  // In the simple case, just pass the coerced loaded value.
3768  assert(NumIRArgs == 1);
3769  IRCallArgs[FirstIRArg] =
3770  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3771  }
3772 
3773  break;
3774  }
3775 
3777  auto coercionType = ArgInfo.getCoerceAndExpandType();
3778  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3779 
3780  llvm::Value *tempSize = nullptr;
3781  Address addr = Address::invalid();
3782  if (RV.isAggregate()) {
3783  addr = RV.getAggregateAddress();
3784  } else {
3785  assert(RV.isScalar()); // complex should always just be direct
3786 
3787  llvm::Type *scalarType = RV.getScalarVal()->getType();
3788  auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3789  auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3790 
3791  tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3792 
3793  // Materialize to a temporary.
3794  addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3795  CharUnits::fromQuantity(std::max(layout->getAlignment(),
3796  scalarAlign)));
3797  EmitLifetimeStart(scalarSize, addr.getPointer());
3798 
3799  Builder.CreateStore(RV.getScalarVal(), addr);
3800  }
3801 
3802  addr = Builder.CreateElementBitCast(addr, coercionType);
3803 
3804  unsigned IRArgPos = FirstIRArg;
3805  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3806  llvm::Type *eltType = coercionType->getElementType(i);
3807  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
3808  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
3809  llvm::Value *elt = Builder.CreateLoad(eltAddr);
3810  IRCallArgs[IRArgPos++] = elt;
3811  }
3812  assert(IRArgPos == FirstIRArg + NumIRArgs);
3813 
3814  if (tempSize) {
3815  EmitLifetimeEnd(tempSize, addr.getPointer());
3816  }
3817 
3818  break;
3819  }
3820 
3821  case ABIArgInfo::Expand:
3822  unsigned IRArgPos = FirstIRArg;
3823  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3824  assert(IRArgPos == FirstIRArg + NumIRArgs);
3825  break;
3826  }
3827  }
3828 
3829  if (ArgMemory.isValid()) {
3830  llvm::Value *Arg = ArgMemory.getPointer();
3831  if (CallInfo.isVariadic()) {
3832  // When passing non-POD arguments by value to variadic functions, we will
3833  // end up with a variadic prototype and an inalloca call site. In such
3834  // cases, we can't do any parameter mismatch checks. Give up and bitcast
3835  // the callee.
3836  unsigned CalleeAS =
3837  cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3838  Callee = Builder.CreateBitCast(
3839  Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3840  } else {
3841  llvm::Type *LastParamTy =
3842  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3843  if (Arg->getType() != LastParamTy) {
3844 #ifndef NDEBUG
3845  // Assert that these structs have equivalent element types.
3846  llvm::StructType *FullTy = CallInfo.getArgStruct();
3847  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3848  cast<llvm::PointerType>(LastParamTy)->getElementType());
3849  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3850  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3851  DE = DeclaredTy->element_end(),
3852  FI = FullTy->element_begin();
3853  DI != DE; ++DI, ++FI)
3854  assert(*DI == *FI);
3855 #endif
3856  Arg = Builder.CreateBitCast(Arg, LastParamTy);
3857  }
3858  }
3859  assert(IRFunctionArgs.hasInallocaArg());
3860  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3861  }
3862 
3863  if (!CallArgs.getCleanupsToDeactivate().empty())
3864  deactivateArgCleanupsBeforeCall(*this, CallArgs);
3865 
3866  // If the callee is a bitcast of a function to a varargs pointer to function
3867  // type, check to see if we can remove the bitcast. This handles some cases
3868  // with unprototyped functions.
3869  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3870  if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3871  llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3872  llvm::FunctionType *CurFT =
3873  cast<llvm::FunctionType>(CurPT->getElementType());
3874  llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3875 
3876  if (CE->getOpcode() == llvm::Instruction::BitCast &&
3877  ActualFT->getReturnType() == CurFT->getReturnType() &&
3878  ActualFT->getNumParams() == CurFT->getNumParams() &&
3879  ActualFT->getNumParams() == IRCallArgs.size() &&
3880  (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3881  bool ArgsMatch = true;
3882  for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3883  if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3884  ArgsMatch = false;
3885  break;
3886  }
3887 
3888  // Strip the cast if we can get away with it. This is a nice cleanup,
3889  // but also allows us to inline the function at -O0 if it is marked
3890  // always_inline.
3891  if (ArgsMatch)
3892  Callee = CalleeF;
3893  }
3894  }
3895 
3896  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3897  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3898  // Inalloca argument can have different type.
3899  if (IRFunctionArgs.hasInallocaArg() &&
3900  i == IRFunctionArgs.getInallocaArgNo())
3901  continue;
3902  if (i < IRFuncTy->getNumParams())
3903  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3904  }
3905 
3906  unsigned CallingConv;
3908  CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3909  AttributeList, CallingConv,
3910  /*AttrOnCallSite=*/true);
3911  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3912  AttributeList);
3913 
3914  bool CannotThrow;
3915  if (currentFunctionUsesSEHTry()) {
3916  // SEH cares about asynchronous exceptions, everything can "throw."
3917  CannotThrow = false;
3918  } else if (isCleanupPadScope() &&
3920  // The MSVC++ personality will implicitly terminate the program if an
3921  // exception is thrown. An unwind edge cannot be reached.
3922  CannotThrow = true;
3923  } else {
3924  // Otherwise, nowunind callsites will never throw.
3925  CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3926  llvm::Attribute::NoUnwind);
3927  }
3928  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3929 
3931  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3932 
3933  llvm::CallSite CS;
3934  if (!InvokeDest) {
3935  CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3936  } else {
3937  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3938  CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3939  BundleList);
3940  EmitBlock(Cont);
3941  }
3942  if (callOrInvoke)
3943  *callOrInvoke = CS.getInstruction();
3944 
3945  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3946  !CS.hasFnAttr(llvm::Attribute::NoInline))
3947  Attrs =
3948  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3949  llvm::Attribute::AlwaysInline);
3950 
3951  // Disable inlining inside SEH __try blocks.
3952  if (isSEHTryScope())
3953  Attrs =
3954  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3955  llvm::Attribute::NoInline);
3956 
3957  CS.setAttributes(Attrs);
3958  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3959 
3960  // Insert instrumentation or attach profile metadata at indirect call sites.
3961  // For more details, see the comment before the definition of
3962  // IPVK_IndirectCallTarget in InstrProfData.inc.
3963  if (!CS.getCalledFunction())
3964  PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
3965  CS.getInstruction(), Callee);
3966 
3967  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3968  // optimizer it can aggressively ignore unwind edges.
3969  if (CGM.getLangOpts().ObjCAutoRefCount)
3970  AddObjCARCExceptionMetadata(CS.getInstruction());
3971 
3972  // If the call doesn't return, finish the basic block and clear the
3973  // insertion point; this allows the rest of IRgen to discard
3974  // unreachable code.
3975  if (CS.doesNotReturn()) {
3976  if (UnusedReturnSize)
3977  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3978  SRetPtr.getPointer());
3979 
3980  Builder.CreateUnreachable();
3981  Builder.ClearInsertionPoint();
3982 
3983  // FIXME: For now, emit a dummy basic block because expr emitters in
3984  // generally are not ready to handle emitting expressions at unreachable
3985  // points.
3987 
3988  // Return a reasonable RValue.
3989  return GetUndefRValue(RetTy);
3990  }
3991 
3992  llvm::Instruction *CI = CS.getInstruction();
3993  if (!CI->getType()->isVoidTy())
3994  CI->setName("call");
3995 
3996  // Perform the swifterror writeback.
3997  if (swiftErrorTemp.isValid()) {
3998  llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
3999  Builder.CreateStore(errorResult, swiftErrorArg);
4000  }
4001 
4002  // Emit any writebacks immediately. Arguably this should happen
4003  // after any return-value munging.
4004  if (CallArgs.hasWritebacks())
4005  emitWritebacks(*this, CallArgs);
4006 
4007  // The stack cleanup for inalloca arguments has to run out of the normal
4008  // lexical order, so deactivate it and run it manually here.
4009  CallArgs.freeArgumentMemory(*this);
4010 
4011  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4012  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
4013  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4014  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4015  }
4016 
4017  RValue Ret = [&] {
4018  switch (RetAI.getKind()) {
4020  auto coercionType = RetAI.getCoerceAndExpandType();
4021  auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4022 
4023  Address addr = SRetPtr;
4024  addr = Builder.CreateElementBitCast(addr, coercionType);
4025 
4026  assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4027  bool requiresExtract = isa<llvm::StructType>(CI->getType());
4028 
4029  unsigned unpaddedIndex = 0;
4030  for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4031  llvm::Type *eltType = coercionType->getElementType(i);
4032  if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4033  Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4034  llvm::Value *elt = CI;
4035  if (requiresExtract)
4036  elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4037  else
4038  assert(unpaddedIndex == 0);
4039  Builder.CreateStore(elt, eltAddr);
4040  }
4041  // FALLTHROUGH
4042  }
4043 
4044  case ABIArgInfo::InAlloca:
4045  case ABIArgInfo::Indirect: {
4046  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4047  if (UnusedReturnSize)
4048  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4049  SRetPtr.getPointer());
4050  return ret;
4051  }
4052 
4053  case ABIArgInfo::Ignore:
4054  // If we are ignoring an argument that had a result, make sure to
4055  // construct the appropriate return value for our caller.
4056  return GetUndefRValue(RetTy);
4057 
4058  case ABIArgInfo::Extend:
4059  case ABIArgInfo::Direct: {
4060  llvm::Type *RetIRTy = ConvertType(RetTy);
4061  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4062  switch (getEvaluationKind(RetTy)) {
4063  case TEK_Complex: {
4064  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4065  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4066  return RValue::getComplex(std::make_pair(Real, Imag));
4067  }
4068  case TEK_Aggregate: {
4069  Address DestPtr = ReturnValue.getValue();
4070  bool DestIsVolatile = ReturnValue.isVolatile();
4071 
4072  if (!DestPtr.isValid()) {
4073  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4074  DestIsVolatile = false;
4075  }
4076  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4077  return RValue::getAggregate(DestPtr);
4078  }
4079  case TEK_Scalar: {
4080  // If the argument doesn't match, perform a bitcast to coerce it. This
4081  // can happen due to trivial type mismatches.
4082  llvm::Value *V = CI;
4083  if (V->getType() != RetIRTy)
4084  V = Builder.CreateBitCast(V, RetIRTy);
4085  return RValue::get(V);
4086  }
4087  }
4088  llvm_unreachable("bad evaluation kind");
4089  }
4090 
4091  Address DestPtr = ReturnValue.getValue();
4092  bool DestIsVolatile = ReturnValue.isVolatile();
4093 
4094  if (!DestPtr.isValid()) {
4095  DestPtr = CreateMemTemp(RetTy, "coerce");
4096  DestIsVolatile = false;
4097  }
4098 
4099  // If the value is offset in memory, apply the offset now.
4100  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4101  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4102 
4103  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4104  }
4105 
4106  case ABIArgInfo::Expand:
4107  llvm_unreachable("Invalid ABI kind for return argument");
4108  }
4109 
4110  llvm_unreachable("Unhandled ABIArgInfo::Kind");
4111  } ();
4112 
4113  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
4114 
4115  if (Ret.isScalar() && TargetDecl) {
4116  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4117  llvm::Value *OffsetValue = nullptr;
4118  if (const auto *Offset = AA->getOffset())
4119  OffsetValue = EmitScalarExpr(Offset);
4120 
4121  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4122  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4123  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4124  OffsetValue);
4125  }
4126  }
4127 
4128  return Ret;
4129 }
4130 
4131 /* VarArg handling */
4132 
4134  VAListAddr = VE->isMicrosoftABI()
4135  ? EmitMSVAListRef(VE->getSubExpr())
4136  : EmitVAListRef(VE->getSubExpr());
4137  QualType Ty = VE->getType();
4138  if (VE->isMicrosoftABI())
4139  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4140  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4141 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:599
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1464
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:151
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:408
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:909
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
Definition: Decl.h:1561
bool isVariadic() const
Definition: Type.h:3366
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
Definition: CGCall.cpp:573
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, const FunctionDecl *CalleeDecl=nullptr, unsigned ParamsToSkip=0)
EmitCallArgs - Emit call arguments for a function.
StringRef getName() const
getName - Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:237
ObjCEntrypoints & getObjCEntrypoints() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2179
Complete object ctor.
Definition: ABI.h:26
CanQualType VoidPtrTy
Definition: ASTContext.h:908
A (possibly-)qualified type.
Definition: Type.h:598
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
Definition: CGCall.cpp:1423
llvm::Type * ConvertTypeForMem(QualType T)
CanQualType getReturnType() const
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:2986
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:77
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:64
unsigned getInAllocaFieldIndex() const
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:2911
llvm::LLVMContext & getLLVMContext()
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:1036
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2584
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:508
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:2879
const TargetInfo & getTarget() const
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:160
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, const FunctionDecl *FD, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:3148
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition: CGExpr.cpp:69
Extend - Valid only for integer argument types.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4133
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:2902
llvm::LoadInst * CreateDefaultAlignedLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:127
Address getAddress() const
Definition: CGValue.h:331
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:212
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1275
llvm::CallingConv::ID getRuntimeCC() const
bool hasFlexibleArrayMember() const
Definition: Decl.h:3305
ASTContext & getContext() const
Definition: CodeGenTypes.h:176
const llvm::DataLayout & getDataLayout() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type...
Definition: Type.h:3416
The base class of the type hierarchy.
Definition: Type.h:1281
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1593
RValue asAggregateRValue() const
Definition: CGValue.h:435
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:2034
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:889
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
const LangOptions & getLangOpts() const
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference...
Definition: CGExpr.cpp:3422
bool isBlockPointerType() const
Definition: Type.h:5488
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1940
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:340
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2187
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1776
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:2014
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:3140
Default closure variant of a ctor.
Definition: ABI.h:30
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:768
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
Definition: CGCall.cpp:319
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change...
Definition: TargetCXXABI.h:217
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:1140
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
Definition: Expr.cpp:1871
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:1813
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:113
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it.
Definition: CGCall.cpp:3494
virtual bool shouldSignExtUnsignedType(QualType Ty) const
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:245
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:258
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1377
bool isObjCRetainableType() const
Definition: Type.cpp:3699
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3132
bool isVoidType() const
Definition: Type.h:5680
static bool isProvablyNonNull(llvm::Value *addr)
Definition: CGCall.cpp:2906
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:107
unsigned getNumParams() const
Definition: Type.h:3271
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3253
const_arg_iterator arg_end() const
An object to manage conditionally-evaluated expressions.
Description of a constructor that was inherited from a base class.
Definition: DeclCXX.h:2161
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:2973
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:2976
ParameterABI getABI() const
Return the ABI treatment of this parameter.
Definition: Type.h:3104
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2664
class LLVM_ALIGNAS(8) DependentTemplateSpecializationType const IdentifierInfo * Name
Represents a template specialization type whose template cannot be resolved, e.g. ...
Definition: Type.h:4549
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:92
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:485
bool isReferenceType() const
Definition: Type.h:5491
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2173
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2293
bool isAnyPointerType() const
Definition: Type.h:5485
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
Definition: Type.h:3417
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2163
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
Definition: CGCall.cpp:3005
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3467
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
llvm::Type * getCoerceToType() const
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, CGCalleeInfo CalleeInfo=CGCalleeInfo(), llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3507
unsigned getRegParm() const
Definition: Type.h:2948
const Decl * getDecl() const
Definition: GlobalDecl.h:62
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will always be accessible even if ...
Definition: CGExpr.cpp:159
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Definition: ExprObjC.h:1494
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:106
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3425
CharUnits getArgStructAlignment() const
const FunctionProtoType * getCalleeFunctionProtoType()
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment...
static bool hasScalarEvaluationKind(QualType T)
CharUnits getAlignment() const
Definition: CGValue.h:316
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2456
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:588
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:168
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:44
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:260
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
QualType getReturnType() const
Definition: Type.h:3009
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:1838
field_range fields() const
Definition: Decl.h:3382
const Expr * getSubExpr() const
Definition: Expr.h:3673
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:270
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:418
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
Definition: CGCall.cpp:643
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2448
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
Definition: EHScopeStack.h:85
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1892
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
Definition: CGCall.cpp:233
GlobalDecl CurGD
CurGD - The GlobalDecl for the current function being compiled.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
An ordinary object is located at an address in memory.
Definition: Specifiers.h:121
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition: CGExpr.cpp:4156
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:672
bool isValid() const
Definition: Address.h:36
detail::InMemoryDirectory::const_iterator I
llvm::StructType * getCoerceAndExpandType() const
QualType getCanonicalTypeInternal() const
Definition: Type.h:2001
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
QualType getType() const
Definition: Decl.h:599
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:3264
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:456
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the 'this' type for codegen purposes, i.e.
Definition: CGCall.cpp:71
const BlockExpr * BlockExpression
Definition: CGBlocks.h:231
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type...
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
Definition: CGCall.cpp:445
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
Definition: Type.h:3039
unsigned getNumRequiredArgs() const
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3456
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
bool isUnion() const
Definition: Decl.h:2939
llvm::Type * getUnpaddedCoerceAndExpandType() const
ExtInfo getExtInfo() const
Definition: Type.h:3018
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, StringRef CheckName, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will call a handler function in a sanitizer runtime with the provided argum...
Definition: CGExpr.cpp:2497
CanQualType getCanonicalTypeUnqualified() const
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3073
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3304
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
const CodeGen::CGBlockInfo * BlockInfo
const TargetCodeGenInfo & getTargetCodeGenInfo()
writeback_const_range writebacks() const
Definition: CGCall.h:102
const TargetInfo & getTarget() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:114
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:223
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
Definition: CodeGenPGO.cpp:753
ASTContext * Context
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:91
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:2881
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3655
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:415
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > &paramInfos, CanQual< FunctionProtoType > FPT, const FunctionDecl *FD)
Adds the formal paramaters in FPT to the given prefix.
Definition: CGCall.cpp:104
Address Temporary
The temporary alloca.
Definition: CGCall.h:67
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:70
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *FD)
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1722
static TypeEvaluationKind getEvaluationKind(QualType T)
hasAggregateLLVMType - Return true if the specified AST type will map into an aggregate LLVM type or ...
llvm::Value * getPointer() const
Definition: Address.h:38
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:2861
Expr - This represents one expression.
Definition: Expr.h:105
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:69
static Address invalid()
Definition: Address.h:35
bool isInstance() const
Definition: DeclCXX.h:1763
bool isAggregate() const
Definition: CGValue.h:53
CGCXXABI & getCXXABI() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2856
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:86
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1374
static ParamValue forIndirect(Address addr)
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
Definition: CGCall.cpp:636
bool isVirtual() const
Definition: DeclCXX.h:1780
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2414
llvm::Constant * objc_retain
id objc_retain(id);
CharUnits getIndirectAlign() const
RValue asRValue() const
Definition: CGValue.h:578
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:639
const ParmVarDecl * getParamDecl(unsigned i) const
Definition: Decl.h:2011
bool getNoReturn() const
Definition: Type.h:2945
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp")
CreateAggTemp - Create a temporary memory object for the given aggregate type.
ASTContext & getContext() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:406
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:81
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
Definition: CGCall.cpp:304
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2444
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::LLVMContext & getLLVMContext()
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type...
Definition: CGCall.cpp:1427
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:586
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1269
Address EmitPointerWithAlignment(const Expr *Addr, AlignmentSource *Source=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:820
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2602
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:105
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression's (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1668
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=None)
class LLVM_ALIGNAS(8) TemplateSpecializationType unsigned NumArgs
Represents a type template specialization; the template must be a class template, a type alias templa...
Definition: Type.h:4154
bool isGLValue() const
Definition: Expr.h:250
llvm::Type * getPaddingType() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:279
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:502
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1762
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:1086
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:231
bool hasWritebacks() const
Definition: CGCall.h:97
unsigned getNumRequiredArgs() const
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:29
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1191
bool isVolatile() const
Definition: CGValue.h:295
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:36
The l-value was considered opaque, so the alignment was determined from a type.
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.cpp:2775
void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment, llvm::Value *OffsetValue=nullptr)
ArrayRef< ParmVarDecl * > parameters() const
Definition: DeclObjC.h:371
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1216
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:160
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:145
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
Definition: CGCall.cpp:1432
Address getValue() const
Definition: CGCall.h:171
bool isSimple() const
Definition: CGValue.h:246
FunctionType::ExtInfo getExtInfo() const
ConstExprIterator const_arg_iterator
Definition: Expr.h:2238
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:2994
ASTContext & getContext() const
RequiredArgs getRequiredArgs() const
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition: CGObjC.cpp:1783
unsigned getNumParams() const
getNumParams - Return the number of parameters this function must have based on its FunctionType...
Definition: Decl.cpp:2742
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2047
llvm::BasicBlock * getUnreachableBlock()
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1262
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Definition: CGCall.cpp:248
const std::string ID
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1882
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1193
An aggregate value slot.
Definition: CGValue.h:441
bool isVariadic() const
Definition: DeclObjC.h:416
llvm::Value * EmitLifetimeStart(uint64_t Size, llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition: CGDecl.cpp:930
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
Definition: CGCall.cpp:405
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1736
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
SanitizerSet SanOpts
Sanitizers enabled for this function.
CoerceAndExpand - Only valid for aggregate argument types.
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:3132
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2114
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
Definition: TargetOptions.h:55
static bool classof(const EHScope *Scope)
Definition: CGCleanup.h:420
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:696
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:197
CanQualType VoidTy
Definition: ASTContext.h:893
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
const CodeGenOptions & getCodeGenOpts() const
An aligned address.
Definition: Address.h:25
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:180
QualType getReturnType() const
Definition: DeclObjC.h:330
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:5849
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
Definition: CGCall.cpp:312
Complete object dtor.
Definition: ABI.h:36
llvm::Instruction * CurrentFuncletPad
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
Definition: CGDecl.cpp:1747
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
Definition: CGCall.cpp:1449
static ParamValue forDirect(llvm::Value *value)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1609
ConstructorUsingShadowDecl * getShadowDecl() const
Definition: DeclCXX.h:2173
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression, because a __builtin_ms_va_list is a pointer to a char.
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:618
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
Definition: CGCall.cpp:566
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:536
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
Definition: ASTContext.h:1623
Address EmitVAListRef(const Expr *E)
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:1050
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:146
QualType getType() const
Definition: Expr.h:126
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
CGFunctionInfo - Class to encapsulate the information about a function definition.
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2391
llvm::Value * getAggregatePointer() const
Definition: CGValue.h:75
bool isScalar() const
Definition: CGValue.h:51
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:92
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
unsigned getDirectOffset() const
Address CreateMemTemp(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
Definition: CGExpr.cpp:98
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1375
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:1983
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:183
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
LValue Source
The original argument.
Definition: CGCall.h:64
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type...
Definition: CGCall.cpp:380
bool getProducesResult() const
Definition: Type.h:2946
llvm::LoadInst * CreateAlignedLoad(llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:91
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:931
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:140
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CallingConv getCC() const
Definition: Type.h:2954
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:693
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
detail::InMemoryDirectory::const_iterator E
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:727
void EmitAggregateCopy(Address DestPtr, Address SrcPtr, QualType EltTy, bool isVolatile=false, bool isAssignment=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1459
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:113
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
static void getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, SmallVectorImpl< llvm::OperandBundleDef > &BundleList)
Definition: CGCall.cpp:3396
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type. ...
Definition: CGExprAgg.cpp:1437
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2117
static bool hasAggregateEvaluationKind(QualType T)
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3707
Complex values, per C99 6.2.5p11.
Definition: Type.h:2119
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:5818
TargetOptions & getTargetOpts() const
Retrieve the target options.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:127
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
Definition: CGCall.cpp:2545
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition: CGExpr.cpp:903
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition: CGExpr.cpp:3709
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:50
Expand - Only valid for aggregate argument types.
Address getAddress() const
Definition: CGValue.h:562
const CGFunctionInfo & arrangeMSMemberPointerThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
Definition: CGCall.cpp:475
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2319
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1528
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:1848
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:832
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5323
bool isComplex() const
Definition: CGValue.h:52
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:397
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
Represents a base class of a C++ class.
Definition: DeclCXX.h:159
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:132
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1593
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
bool isNoBuiltinFunc(const char *Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
Represents a C++ struct/union/class.
Definition: DeclCXX.h:263
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:313
llvm::BasicBlock * getInvokeDest()
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:124
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:2072
CallingConv getDefaultCallingConvention(bool isVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:970
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
llvm::Instruction * getStackBase() const
Definition: CGCall.h:119
This class is used for builtin types like 'int'.
Definition: Type.h:2039
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:70
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1440
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
const_arg_iterator arg_begin() const
stable_iterator getInnermostEHScope() const
Definition: EHScopeStack.h:361
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:167
bool getHasRegParm() const
Definition: Type.h:2947
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:485
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
CanQualType IntTy
Definition: ASTContext.h:901
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3678
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2200
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:2982
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:177
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
Definition: DeclCXX.h:3016
static RValue get(llvm::Value *V)
Definition: CGValue.h:85
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional, const FunctionDecl *FD)
Compute the arguments required by the given formal prototype, given that there may be some additional...
bool isVolatileQualified() const
Definition: CGValue.h:55
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
Definition: CGDecl.cpp:943
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
Definition: CGCall.cpp:556
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:2388
bool getIndirectRealign() const
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:106
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
CodeGenTypes & getTypes() const
LValue - This represents an lvalue references.
Definition: CGValue.h:152
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:733
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:147
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1736
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1620
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition: CGClass.cpp:265
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:56
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2512
CGCalleeInfo - Class to encapsulate the information about a callee to be used during the generation o...
A class which abstracts out some details necessary for making a call.
Definition: Type.h:2904
This parameter (which must have pointer type) is a Swift indirect result parameter.
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:5702
Expr * IgnoreParens() LLVM_READONLY
IgnoreParens - Ignore parentheses.
Definition: Expr.cpp:2295
AttributeList - Represents a syntactic attribute.
Definition: AttributeList.h:94
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5286
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:351
bool isPointerType() const
Definition: Type.h:5482
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1466