24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
31 using namespace clang;
32 using namespace CodeGen;
40 for (
unsigned I = FirstIndex;
I <= LastIndex; ++
I) {
42 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array,
I);
56 ByRef, Realign, Padding);
87 unsigned maxAllRegisters) {
88 unsigned intCount = 0, fpCount = 0;
90 if (
type->isPointerTy()) {
92 }
else if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
94 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
96 assert(
type->isVectorTy() ||
type->isFloatingPointTy());
101 return (intCount + fpCount > maxAllRegisters);
106 unsigned numElts)
const {
133 if (UD->hasAttr<TransparentUnionAttr>()) {
134 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
168 uint64_t Members)
const {
177 raw_ostream &OS = llvm::errs();
178 OS <<
"(ABIArgInfo Kind=";
181 OS <<
"Direct Type=";
205 OS <<
"CoerceAndExpand Type=";
219 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
221 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
223 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
225 Ptr->getName() +
".aligned");
249 bool AllowHigherAlign) {
259 if (AllowHigherAlign && DirectAlign > SlotSize) {
276 !DirectTy->isStructTy()) {
299 std::pair<CharUnits, CharUnits> ValueInfo,
301 bool AllowHigherAlign) {
308 DirectSize = ValueInfo.first;
309 DirectAlign = ValueInfo.second;
315 DirectTy = DirectTy->getPointerTo(0);
318 DirectSize, DirectAlign,
331 Address Addr1, llvm::BasicBlock *Block1,
332 Address Addr2, llvm::BasicBlock *Block2,
333 const llvm::Twine &
Name =
"") {
393 if (AT->getSize() == 0)
395 FT = AT->getElementType();
406 if (isa<CXXRecordDecl>(RT->
getDecl()))
424 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
425 for (
const auto &
I : CXXRD->bases())
429 for (
const auto *
I : RD->
fields())
452 const Type *Found =
nullptr;
455 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
456 for (
const auto &
I : CXXRD->bases()) {
474 for (
const auto *FD : RD->
fields()) {
488 if (AT->getSize().getZExtValue() != 1)
490 FT = AT->getElementType();
526 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
529 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
538 return Address(Addr, TyAlignForABI);
541 "Unexpected ArgInfo Kind in generic VAArg emitter!");
544 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
546 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
548 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
550 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
563 class DefaultABIInfo :
public ABIInfo {
598 return getNaturalAlignIndirect(Ty);
603 Ty = EnumTy->getDecl()->getIntegerType();
614 return getNaturalAlignIndirect(RetTy);
618 RetTy = EnumTy->getDecl()->getIntegerType();
630 class WebAssemblyABIInfo final :
public DefaultABIInfo {
633 : DefaultABIInfo(CGT) {}
705 getContext().getTypeInfoInChars(Ty),
717 class PNaClABIInfo :
public ABIInfo {
759 return getNaturalAlignIndirect(Ty);
762 Ty = EnumTy->getDecl()->getIntegerType();
778 return getNaturalAlignIndirect(RetTy);
782 RetTy = EnumTy->getDecl()->getIntegerType();
791 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
792 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
793 IRType->getScalarSizeInBits() != 64;
797 StringRef Constraint,
799 if ((Constraint ==
"y" || Constraint ==
"&y") && Ty->isVectorTy()) {
800 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
816 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
822 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
830 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
831 return NumMembers <= 4;
840 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
844 unsigned FreeSSERegs;
854 static const unsigned MinABIStackAlignInBytes = 4;
856 bool IsDarwinVectorABI;
857 bool IsRetSmallStructInRegABI;
858 bool IsWin32StructABI;
861 unsigned DefaultNumRegisterParameters;
863 static bool isRegisterSize(
unsigned Size) {
864 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
867 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
869 return isX86VectorTypeForVectorCall(getContext(), Ty);
872 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
873 uint64_t NumMembers)
const override {
875 return isX86VectorCallAggregateSmallEnough(NumMembers);
887 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
896 bool shouldAggregateUseDirect(
QualType Ty, CCState &
State,
bool &InReg,
897 bool &NeedsPadding)
const;
898 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const;
900 bool canExpandIndirectArgument(
QualType Ty)
const;
917 bool RetSmallStructInRegABI,
bool Win32StructABI,
918 unsigned NumRegisterParameters,
bool SoftFloatABI)
919 :
SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
920 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
921 IsWin32StructABI(Win32StructABI),
922 IsSoftFloatABI(SoftFloatABI),
923 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
924 DefaultNumRegisterParameters(NumRegisterParameters) {}
926 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
928 bool asReturnValue)
const override {
940 bool RetSmallStructInRegABI,
bool Win32StructABI,
941 unsigned NumRegisterParameters,
bool SoftFloatABI)
943 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
944 NumRegisterParameters, SoftFloatABI)) {}
946 static bool isStructReturnInRegABI(
949 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
962 StringRef Constraint,
964 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
968 std::string &Constraints,
969 std::vector<llvm::Type *> &ResultRegTypes,
970 std::vector<llvm::Type *> &ResultTruncRegTypes,
971 std::vector<LValue> &ResultRegDests,
972 std::string &AsmString,
973 unsigned NumOutputs)
const override;
977 unsigned Sig = (0xeb << 0) |
981 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
984 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
985 return "movl\t%ebp, %ebp"
986 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1001 unsigned NumNewOuts,
1002 std::string &AsmString) {
1004 llvm::raw_string_ostream OS(Buf);
1006 while (Pos < AsmString.size()) {
1007 size_t DollarStart = AsmString.find(
'$', Pos);
1008 if (DollarStart == std::string::npos)
1009 DollarStart = AsmString.size();
1010 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
1011 if (DollarEnd == std::string::npos)
1012 DollarEnd = AsmString.size();
1013 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1015 size_t NumDollars = DollarEnd - DollarStart;
1016 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1018 size_t DigitStart = Pos;
1019 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
1020 if (DigitEnd == std::string::npos)
1021 DigitEnd = AsmString.size();
1022 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1023 unsigned OperandIndex;
1024 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1025 if (OperandIndex >= FirstIn)
1026 OperandIndex += NumNewOuts;
1034 AsmString = std::move(OS.str());
1038 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1040 std::vector<llvm::Type *> &ResultRegTypes,
1041 std::vector<llvm::Type *> &ResultTruncRegTypes,
1042 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1043 unsigned NumOutputs)
const {
1048 if (!Constraints.empty())
1050 if (RetWidth <= 32) {
1051 Constraints +=
"={eax}";
1052 ResultRegTypes.push_back(CGF.
Int32Ty);
1055 Constraints +=
"=A";
1056 ResultRegTypes.push_back(CGF.
Int64Ty);
1061 ResultTruncRegTypes.push_back(CoerceTy);
1065 CoerceTy->getPointerTo()));
1066 ResultRegDests.push_back(ReturnSlot);
1073 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1079 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1085 if (Size == 64 || Size == 128)
1100 return shouldReturnTypeInRegister(AT->getElementType(),
Context);
1104 if (!RT)
return false;
1116 if (!shouldReturnTypeInRegister(FD->getType(),
Context))
1125 Ty = CTy->getElementType();
1135 return Size == 32 || Size == 64;
1142 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
1148 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1149 if (!IsWin32StructABI ) {
1152 if (!CXXRD->isCLike())
1156 if (CXXRD->isDynamicClass())
1168 for (
const auto *FD : RD->
fields()) {
1178 if (FD->isBitField())
1181 Size += getContext().getTypeSize(FD->getType());
1185 return Size == getContext().getTypeSize(Ty);
1191 if (State.FreeRegs) {
1194 return getNaturalAlignIndirectInReg(RetTy);
1196 return getNaturalAlignIndirect(RetTy,
false);
1200 CCState &State)
const {
1205 uint64_t NumElts = 0;
1206 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1207 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1214 if (IsDarwinVectorABI) {
1215 uint64_t Size = getContext().getTypeSize(RetTy);
1222 llvm::Type::getInt64Ty(getVMContext()), 2));
1226 if ((Size == 8 || Size == 16 || Size == 32) ||
1227 (Size == 64 && VT->getNumElements() == 1))
1231 return getIndirectReturnResult(RetTy, State);
1241 return getIndirectReturnResult(RetTy, State);
1246 return getIndirectReturnResult(RetTy, State);
1254 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1255 uint64_t Size = getContext().getTypeSize(RetTy);
1263 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1264 || SeltTy->hasPointerRepresentation())
1272 return getIndirectReturnResult(RetTy, State);
1277 RetTy = EnumTy->getDecl()->getIntegerType();
1294 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1295 for (
const auto &
I : CXXRD->bases())
1299 for (
const auto *i : RD->
fields()) {
1312 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1313 unsigned Align)
const {
1316 if (Align <= MinABIStackAlignInBytes)
1320 if (!IsDarwinVectorABI) {
1322 return MinABIStackAlignInBytes;
1330 return MinABIStackAlignInBytes;
1334 CCState &State)
const {
1336 if (State.FreeRegs) {
1339 return getNaturalAlignIndirectInReg(Ty);
1341 return getNaturalAlignIndirect(Ty,
false);
1345 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1346 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1347 if (StackAlign == 0)
1352 bool Realign = TypeAlign > StackAlign;
1357 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1364 if (K == BuiltinType::Float || K == BuiltinType::Double)
1370 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1371 if (!IsSoftFloatABI) {
1377 unsigned Size = getContext().getTypeSize(Ty);
1378 unsigned SizeInRegs = (Size + 31) / 32;
1380 if (SizeInRegs == 0)
1384 if (SizeInRegs > State.FreeRegs) {
1393 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1397 State.FreeRegs -= SizeInRegs;
1401 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1403 bool &NeedsPadding)
const {
1410 NeedsPadding =
false;
1413 if (!updateFreeRegs(Ty, State))
1419 if (State.CC == llvm::CallingConv::X86_FastCall ||
1420 State.CC == llvm::CallingConv::X86_VectorCall) {
1421 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1422 NeedsPadding =
true;
1430 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1431 if (!updateFreeRegs(Ty, State))
1437 if (State.CC == llvm::CallingConv::X86_FastCall ||
1438 State.CC == llvm::CallingConv::X86_VectorCall) {
1439 if (getContext().getTypeSize(Ty) > 32)
1450 CCState &State)
const {
1460 return getIndirectResult(Ty,
false, State);
1469 const Type *Base =
nullptr;
1470 uint64_t NumElts = 0;
1471 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1472 isHomogeneousAggregate(Ty, Base, NumElts)) {
1473 if (State.FreeSSERegs >= NumElts) {
1474 State.FreeSSERegs -= NumElts;
1479 return getIndirectResult(Ty,
false, State);
1486 return getIndirectResult(Ty,
true, State);
1489 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
1492 llvm::LLVMContext &LLVMContext = getVMContext();
1493 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1494 bool NeedsPadding =
false;
1496 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1497 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1505 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1513 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1514 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1516 State.CC == llvm::CallingConv::X86_FastCall ||
1517 State.CC == llvm::CallingConv::X86_VectorCall,
1520 return getIndirectResult(Ty,
true, State);
1526 if (IsDarwinVectorABI) {
1527 uint64_t Size = getContext().getTypeSize(Ty);
1528 if ((Size == 8 || Size == 16 || Size == 32) ||
1529 (Size == 64 && VT->getNumElements() == 1))
1534 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1542 Ty = EnumTy->getDecl()->getIntegerType();
1544 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1561 else if (State.CC == llvm::CallingConv::X86_FastCall)
1563 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1565 State.FreeSSERegs = 6;
1569 State.FreeRegs = DefaultNumRegisterParameters;
1576 if (State.FreeRegs) {
1587 bool UsedInAlloca =
false;
1596 rewriteWithInAlloca(FI);
1606 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1608 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1609 StackOffset += getContext().getTypeSizeInChars(Type);
1613 StackOffset = FieldEnd.
alignTo(FieldAlign);
1614 if (StackOffset != FieldEnd) {
1615 CharUnits NumBytes = StackOffset - FieldEnd;
1616 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1617 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1618 FrameFields.push_back(Ty);
1643 llvm_unreachable(
"invalid enum");
1646 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1647 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1664 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1671 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1681 for (; I !=
E; ++
I) {
1683 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1686 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1694 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1701 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1708 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1710 assert(Triple.getArch() == llvm::Triple::x86);
1712 switch (Opts.getStructReturnConvention()) {
1721 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1724 switch (Triple.getOS()) {
1725 case llvm::Triple::DragonFly:
1726 case llvm::Triple::FreeBSD:
1727 case llvm::Triple::OpenBSD:
1728 case llvm::Triple::Bitrig:
1729 case llvm::Triple::Win32:
1736 void X86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1737 llvm::GlobalValue *GV,
1739 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1740 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1742 llvm::Function *Fn = cast<llvm::Function>(GV);
1745 llvm::AttrBuilder B;
1746 B.addStackAlignmentAttr(16);
1747 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1749 llvm::AttributeSet::FunctionIndex,
1752 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1753 llvm::Function *Fn = cast<llvm::Function>(GV);
1754 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1759 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1782 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1809 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1811 case X86AVXABILevel::AVX512:
1813 case X86AVXABILevel::AVX:
1818 llvm_unreachable(
"Unknown AVXLevel");
1843 static Class merge(Class Accum, Class Field);
1859 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1885 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1886 bool isNamedArg)
const;
1890 unsigned IROffset,
QualType SourceTy,
1891 unsigned SourceOffset)
const;
1893 unsigned IROffset,
QualType SourceTy,
1894 unsigned SourceOffset)
const;
1910 unsigned freeIntRegs,
1911 unsigned &neededInt,
1912 unsigned &neededSSE,
1913 bool isNamedArg)
const;
1915 bool IsIllegalVectorType(
QualType Ty)
const;
1922 bool honorsRevision0_98()
const {
1923 return !getTarget().getTriple().isOSDarwin();
1928 bool classifyIntegerMMXAsSSE()
const {
1929 const llvm::Triple &Triple = getTarget().getTriple();
1930 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
1932 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
1940 bool Has64BitPointers;
1945 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1949 unsigned neededInt, neededSSE;
1955 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1956 return (vectorTy->getBitWidth() > 128);
1968 bool has64BitPointers()
const {
1969 return Has64BitPointers;
1972 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
1974 bool asReturnValue)
const override {
1980 class WinX86_64ABIInfo :
public ABIInfo {
1984 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1991 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1993 return isX86VectorTypeForVectorCall(getContext(), Ty);
1996 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1997 uint64_t NumMembers)
const override {
1999 return isX86VectorCallAggregateSmallEnough(NumMembers);
2004 bool IsReturnType)
const;
2014 const X86_64ABIInfo &getABIInfo()
const {
2033 StringRef Constraint,
2035 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2038 bool isNoProtoCallVariadic(
const CallArgList &args,
2047 bool HasAVXType =
false;
2048 for (CallArgList::const_iterator
2049 it = args.begin(), ie = args.end(); it != ie; ++it) {
2050 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2066 if (getABIInfo().has64BitPointers())
2076 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
2079 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2081 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2082 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2083 llvm::Function *Fn = cast<llvm::Function>(GV);
2084 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2090 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
2093 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2095 void getDependentLibraryOption(llvm::StringRef Lib,
2099 if (Lib.find(
" ") != StringRef::npos)
2100 Opt +=
"\"" + Lib.str() +
"\"";
2106 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2110 bool Quote = (Lib.find(
" ") != StringRef::npos);
2111 std::string ArgStr = Quote ?
"\"" :
"";
2113 if (!Lib.endswith_lower(
".lib"))
2115 ArgStr += Quote ?
"\"" :
"";
2119 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
2122 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
2123 unsigned NumRegisterParameters)
2124 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2125 Win32StructABI, NumRegisterParameters,
false) {}
2127 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2130 void getDependentLibraryOption(llvm::StringRef Lib,
2132 Opt =
"/DEFAULTLIB:";
2133 Opt += qualifyWindowsLibrary(Lib);
2136 void getDetectMismatchOption(llvm::StringRef
Name,
2137 llvm::StringRef
Value,
2139 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2143 static void addStackProbeSizeTargetAttribute(
const Decl *D,
2144 llvm::GlobalValue *GV,
2146 if (D && isa<FunctionDecl>(D)) {
2148 llvm::Function *Fn = cast<llvm::Function>(GV);
2150 Fn->addFnAttr(
"stack-probe-size",
2156 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
2157 llvm::GlobalValue *GV,
2159 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2161 addStackProbeSizeTargetAttribute(D, GV, CGM);
2170 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2187 void getDependentLibraryOption(llvm::StringRef Lib,
2189 Opt =
"/DEFAULTLIB:";
2190 Opt += qualifyWindowsLibrary(Lib);
2193 void getDetectMismatchOption(llvm::StringRef Name,
2194 llvm::StringRef Value,
2196 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2200 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
2201 llvm::GlobalValue *GV,
2205 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2206 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2207 llvm::Function *Fn = cast<llvm::Function>(GV);
2208 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2212 addStackProbeSizeTargetAttribute(D, GV, CGM);
2216 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2241 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2243 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2245 if (Hi == SSEUp && Lo != SSE)
2249 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2273 assert((Accum != Memory && Accum != ComplexX87) &&
2274 "Invalid accumulated classification during merge.");
2275 if (Accum == Field || Field == NoClass)
2277 if (Field == Memory)
2279 if (Accum == NoClass)
2281 if (Accum == Integer || Field == Integer)
2283 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2284 Accum == X87 || Accum == X87Up)
2289 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2290 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2307 if (k == BuiltinType::Void) {
2309 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2312 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2314 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2316 }
else if (k == BuiltinType::LongDouble) {
2317 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2318 if (LDF == &llvm::APFloat::IEEEquad) {
2321 }
else if (LDF == &llvm::APFloat::x87DoubleExtended) {
2324 }
else if (LDF == &llvm::APFloat::IEEEdouble) {
2327 llvm_unreachable(
"unexpected long double representation!");
2336 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2347 if (Has64BitPointers) {
2354 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2355 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2356 if (EB_FuncPtr != EB_ThisAdj) {
2369 uint64_t Size = getContext().getTypeSize(VT);
2370 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2379 uint64_t EB_Lo = (OffsetBase) / 64;
2380 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2383 }
else if (Size == 64) {
2384 QualType ElementType = VT->getElementType();
2393 if (!classifyIntegerMMXAsSSE() &&
2404 if (OffsetBase && OffsetBase != 64)
2406 }
else if (Size == 128 ||
2407 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2431 uint64_t Size = getContext().getTypeSize(Ty);
2435 else if (Size <= 128)
2437 }
else if (ET == getContext().FloatTy) {
2439 }
else if (ET == getContext().DoubleTy) {
2441 }
else if (ET == getContext().LongDoubleTy) {
2442 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2443 if (LDF == &llvm::APFloat::IEEEquad)
2445 else if (LDF == &llvm::APFloat::x87DoubleExtended)
2446 Current = ComplexX87;
2447 else if (LDF == &llvm::APFloat::IEEEdouble)
2450 llvm_unreachable(
"unexpected long double representation!");
2455 uint64_t EB_Real = (OffsetBase) / 64;
2456 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2457 if (Hi == NoClass && EB_Real != EB_Imag)
2466 uint64_t Size = getContext().getTypeSize(Ty);
2477 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2483 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2484 uint64_t ArraySize = AT->getSize().getZExtValue();
2489 if (Size > 128 && EltSize != 256)
2492 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2493 Class FieldLo, FieldHi;
2494 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2495 Lo = merge(Lo, FieldLo);
2496 Hi = merge(Hi, FieldHi);
2497 if (Lo == Memory || Hi == Memory)
2501 postMerge(Size, Lo, Hi);
2502 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2507 uint64_t Size = getContext().getTypeSize(Ty);
2532 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2533 for (
const auto &I : CXXRD->bases()) {
2534 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2535 "Unexpected base class!");
2537 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2544 Class FieldLo, FieldHi;
2547 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2548 Lo = merge(Lo, FieldLo);
2549 Hi = merge(Hi, FieldHi);
2550 if (Lo == Memory || Hi == Memory) {
2551 postMerge(Size, Lo, Hi);
2560 i != e; ++i, ++idx) {
2562 bool BitField = i->isBitField();
2571 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2573 postMerge(Size, Lo, Hi);
2577 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2579 postMerge(Size, Lo, Hi);
2589 Class FieldLo, FieldHi;
2596 if (i->isUnnamedBitfield())
2600 uint64_t Size = i->getBitWidthValue(getContext());
2602 uint64_t EB_Lo = Offset / 64;
2603 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2606 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2611 FieldHi = EB_Hi ? Integer : NoClass;
2614 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2615 Lo = merge(Lo, FieldLo);
2616 Hi = merge(Hi, FieldHi);
2617 if (Lo == Memory || Hi == Memory)
2621 postMerge(Size, Lo, Hi);
2631 Ty = EnumTy->getDecl()->getIntegerType();
2637 return getNaturalAlignIndirect(Ty);
2640 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2642 uint64_t Size = getContext().getTypeSize(VecTy);
2643 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2644 if (Size <= 64 || Size > LargestVector)
2652 unsigned freeIntRegs)
const {
2664 Ty = EnumTy->getDecl()->getIntegerType();
2675 unsigned Align =
std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2698 if (freeIntRegs == 0) {
2699 uint64_t Size = getContext().getTypeSize(Ty);
2703 if (Align == 8 && Size <= 64)
2720 if (isa<llvm::VectorType>(IRType) ||
2721 IRType->getTypeID() == llvm::Type::FP128TyID)
2725 uint64_t Size = getContext().getTypeSize(Ty);
2726 assert((Size == 128 || Size == 256) &&
"Invalid type found!");
2729 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2746 if (TySize <= StartBit)
2751 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
2754 for (
unsigned i = 0; i != NumElts; ++i) {
2756 unsigned EltOffset = i*EltSize;
2757 if (EltOffset >= EndBit)
break;
2759 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2773 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2774 for (
const auto &I : CXXRD->bases()) {
2775 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2776 "Unexpected base class!");
2778 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2782 if (BaseOffset >= EndBit)
continue;
2784 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2797 i != e; ++i, ++idx) {
2801 if (FieldOffset >= EndBit)
break;
2803 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2822 const llvm::DataLayout &TD) {
2824 if (IROffset == 0 && IRType->isFloatTy())
2828 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2829 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2830 unsigned Elt = SL->getElementContainingOffset(IROffset);
2831 IROffset -= SL->getElementOffset(Elt);
2836 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2838 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2839 IROffset -= IROffset/EltSize*EltSize;
2850 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
2851 QualType SourceTy,
unsigned SourceOffset)
const {
2856 SourceOffset*8+64, getContext()))
2857 return llvm::Type::getFloatTy(getVMContext());
2864 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2866 return llvm::Type::getDoubleTy(getVMContext());
2885 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
2886 QualType SourceTy,
unsigned SourceOffset)
const {
2889 if (IROffset == 0) {
2891 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2892 IRType->isIntegerTy(64))
2901 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2902 IRType->isIntegerTy(32) ||
2903 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2904 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2905 cast<llvm::IntegerType>(IRType)->getBitWidth();
2908 SourceOffset*8+64, getContext()))
2913 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2915 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2916 if (IROffset < SL->getSizeInBytes()) {
2917 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2918 IROffset -= SL->getElementOffset(FieldIdx);
2920 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2921 SourceTy, SourceOffset);
2925 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2927 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2928 unsigned EltOffset = IROffset/EltSize*EltSize;
2929 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2935 unsigned TySizeInBytes =
2936 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2938 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2942 return llvm::IntegerType::get(getVMContext(),
2943 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2954 const llvm::DataLayout &TD) {
2959 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2960 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2961 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2962 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2974 if (Lo->isFloatTy())
2975 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2977 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2978 &&
"Invalid/unknown lo type");
2979 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2983 llvm::StructType *Result = llvm::StructType::get(Lo, Hi,
nullptr);
2987 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2988 "Invalid x86-64 argument pair!");
2996 X86_64ABIInfo::Class Lo, Hi;
2997 classify(RetTy, 0, Lo, Hi,
true);
3000 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3001 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3010 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3011 "Unknown missing lo part");
3016 llvm_unreachable(
"Invalid classification for lo word.");
3021 return getIndirectReturnResult(RetTy);
3026 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3030 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3033 RetTy = EnumTy->getDecl()->getIntegerType();
3044 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3050 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3057 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
3058 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3059 llvm::Type::getX86_FP80Ty(getVMContext()),
3070 llvm_unreachable(
"Invalid classification for hi word.");
3077 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3082 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3093 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
3094 ResType = GetByteVectorType(RetTy);
3105 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3122 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
3128 X86_64ABIInfo::Class Lo, Hi;
3129 classify(Ty, 0, Lo, Hi, isNamedArg);
3133 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
3134 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
3145 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3146 "Unknown missing lo part");
3159 return getIndirectResult(Ty, freeIntRegs);
3163 llvm_unreachable(
"Invalid classification for lo word.");
3172 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3176 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3179 Ty = EnumTy->getDecl()->getIntegerType();
3193 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3207 llvm_unreachable(
"Invalid classification for hi word.");
3209 case NoClass:
break;
3214 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3224 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3236 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3237 ResType = GetByteVectorType(Ty);
3256 unsigned freeIntRegs = 6, freeSSERegs = 8;
3272 it != ie; ++it, ++ArgNo) {
3273 bool IsNamedArg = ArgNo < NumRequiredArgs;
3275 unsigned neededInt, neededSSE;
3277 neededSSE, IsNamedArg);
3283 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
3284 freeIntRegs -= neededInt;
3285 freeSSERegs -= neededSSE;
3287 it->info = getIndirectResult(it->type, freeIntRegs);
3313 llvm::PointerType::getUnqual(LTy));
3322 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3323 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3324 "overflow_arg_area.next");
3328 return Address(Res, Align);
3331 Address X86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3340 unsigned neededInt, neededSSE;
3348 if (!neededInt && !neededSSE)
3364 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3370 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3371 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3380 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3381 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3382 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3388 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3410 if (neededInt && neededSSE) {
3412 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3416 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3419 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3420 "Unexpected ABI info for mixed regs");
3421 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3422 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3425 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3426 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3439 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3443 }
else if (neededInt) {
3444 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, gp_offset),
3449 std::pair<CharUnits, CharUnits> SizeAlign =
3450 getContext().getTypeInfoInChars(Ty);
3451 uint64_t TySize = SizeAlign.first.getQuantity();
3462 }
else if (neededSSE == 1) {
3463 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3467 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3474 Address RegAddrLo = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3480 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy,
nullptr);
3519 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3524 Address X86_64ABIInfo::EmitMSVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3533 bool IsReturnType)
const {
3539 Ty = EnumTy->getDecl()->getIntegerType();
3541 TypeInfo Info = getContext().getTypeInfo(Ty);
3542 uint64_t Width = Info.
Width;
3547 if (!IsReturnType) {
3553 return getNaturalAlignIndirect(Ty,
false);
3559 const Type *Base =
nullptr;
3560 uint64_t NumElts = 0;
3561 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3562 if (FreeSSERegs >= NumElts) {
3563 FreeSSERegs -= NumElts;
3576 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3583 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3584 return getNaturalAlignIndirect(Ty,
false);
3593 if (BT && BT->
getKind() == BuiltinType::Bool)
3598 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3599 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3600 if (LDF == &llvm::APFloat::x87DoubleExtended)
3612 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3617 FreeSSERegs = IsVectorCall ? 6 : 0;
3619 I.
info = classify(I.
type, FreeSSERegs,
false);
3622 Address WinX86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3633 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
3634 bool IsSoftFloatABI;
3637 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3645 PPC32TargetCodeGenInfo(
CodeGenTypes &CGT,
bool SoftFloatABI)
3661 Address PPC32_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAList,
3663 const unsigned OverflowLimit = 8;
3678 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
3681 bool isF64 = Ty->
isFloatingType() && getContext().getTypeSize(Ty) == 64;
3691 if (isInt || IsSoftFloatABI) {
3700 if (isI64 || (isF64 && IsSoftFloatABI)) {
3701 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
3702 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
3706 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
3712 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3715 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
3722 Address RegSaveAreaPtr =
3724 RegAddr = Address(Builder.
CreateLoad(RegSaveAreaPtr),
3729 if (!(isInt || IsSoftFloatABI)) {
3738 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
3739 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.
Int8Ty,
3746 Builder.CreateAdd(NumRegs,
3747 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
3758 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
3766 Size =
TypeInfo.first.alignTo(OverflowAreaAlign);
3771 Address OverflowAreaAddr =
3773 Address OverflowArea(Builder.
CreateLoad(OverflowAreaAddr,
"argp.cur"),
3777 if (Align > OverflowAreaAlign) {
3787 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
3794 Address Result =
emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
3799 Result = Address(Builder.
CreateLoad(Result,
"aggr"),
3800 getContext().getTypeAlignInChars(Ty));
3814 llvm::IntegerType *i8 = CGF.
Int8Ty;
3815 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3816 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3817 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3851 class PPC64_SVR4_ABIInfo :
public ABIInfo {
3859 static const unsigned GPRBits = 64;
3865 bool IsQPXVectorTy(
const Type *Ty)
const {
3870 unsigned NumElements = VT->getNumElements();
3871 if (NumElements == 1)
3874 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3875 if (getContext().getTypeSize(Ty) <= 256)
3877 }
else if (VT->getElementType()->
3878 isSpecificBuiltinType(BuiltinType::Float)) {
3879 if (getContext().getTypeSize(Ty) <= 128)
3887 bool IsQPXVectorTy(
QualType Ty)
const {
3893 :
ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3895 bool isPromotableTypeForABI(
QualType Ty)
const;
3901 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
3902 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
3903 uint64_t Members)
const override;
3921 if (IsQPXVectorTy(T) ||
3922 (T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
3941 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX)
3953 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
3955 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3971 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
3974 Ty = EnumTy->getDecl()->getIntegerType();
3984 case BuiltinType::Int:
3985 case BuiltinType::UInt:
3999 Ty = CTy->getElementType();
4003 if (IsQPXVectorTy(Ty)) {
4004 if (getContext().getTypeSize(Ty) > 128)
4014 const Type *AlignAsType =
nullptr;
4018 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
4019 getContext().getTypeSize(EltType) == 128) ||
4021 AlignAsType = EltType;
4025 const Type *Base =
nullptr;
4026 uint64_t Members = 0;
4027 if (!AlignAsType &&
Kind == ELFv2 &&
4032 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4033 if (getContext().getTypeSize(AlignAsType) > 128)
4037 }
else if (AlignAsType) {
4044 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4056 uint64_t &Members)
const {
4058 uint64_t NElements = AT->getSize().getZExtValue();
4063 Members *= NElements;
4072 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4073 for (
const auto &I : CXXRD->bases()) {
4078 uint64_t FldMembers;
4082 Members += FldMembers;
4086 for (
const auto *FD : RD->
fields()) {
4091 if (AT->getSize().getZExtValue() == 0)
4093 FT = AT->getElementType();
4100 FD->isBitField() && FD->getBitWidthValue(
getContext()) == 0)
4103 uint64_t FldMembers;
4108 std::max(Members, FldMembers) : Members + FldMembers);
4122 Ty = CT->getElementType();
4138 QualType EltTy = VT->getElementType();
4139 unsigned NumElements =
4154 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4158 if (BT->
getKind() == BuiltinType::Float ||
4159 BT->
getKind() == BuiltinType::Double ||
4160 BT->
getKind() == BuiltinType::LongDouble)
4164 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4170 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4171 const Type *Base, uint64_t Members)
const {
4175 Base->
isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4178 return Members * NumRegs <= 8;
4191 uint64_t Size = getContext().getTypeSize(Ty);
4193 return getNaturalAlignIndirect(Ty,
false);
4194 else if (Size < 128) {
4195 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4204 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4205 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).
getQuantity();
4208 const Type *Base =
nullptr;
4209 uint64_t Members = 0;
4210 if (
Kind == ELFv2 &&
4211 isHomogeneousAggregate(Ty, Base, Members)) {
4213 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4221 uint64_t Bits = getContext().getTypeSize(Ty);
4222 if (Bits > 0 && Bits <= 8 * GPRBits) {
4227 if (Bits <= GPRBits)
4229 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4233 uint64_t RegBits = ABIAlign * 8;
4234 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4235 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4236 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4245 TyAlign > ABIAlign);
4248 return (isPromotableTypeForABI(Ty) ?
4263 uint64_t Size = getContext().getTypeSize(RetTy);
4265 return getNaturalAlignIndirect(RetTy);
4266 else if (Size < 128) {
4267 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4274 const Type *Base =
nullptr;
4275 uint64_t Members = 0;
4276 if (
Kind == ELFv2 &&
4277 isHomogeneousAggregate(RetTy, Base, Members)) {
4279 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4284 uint64_t Bits = getContext().getTypeSize(RetTy);
4285 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4290 if (Bits > GPRBits) {
4291 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4292 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy,
nullptr);
4295 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4300 return getNaturalAlignIndirect(RetTy);
4303 return (isPromotableTypeForABI(RetTy) ?
4308 Address PPC64_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
4310 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4311 TypeInfo.second = getParamTypeAlignment(Ty);
4323 if (EltSize < SlotSize) {
4325 SlotSize * 2, SlotSize,
4328 Address RealAddr = Addr;
4329 Address ImagAddr = RealAddr;
4332 SlotSize - EltSize);
4334 2 * SlotSize - EltSize);
4365 llvm::IntegerType *i8 = CGF.
Int8Ty;
4366 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4367 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4368 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4399 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4434 ABIKind getABIKind()
const {
return Kind; }
4435 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4439 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4440 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4441 uint64_t Members)
const override;
4443 bool isIllegalVectorType(
QualType Ty)
const;
4453 Address EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
4456 Address EmitAAPCSVAArg(Address VAListAddr,
QualType Ty,
4461 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4462 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4465 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
4467 bool asReturnValue)
const override {
4477 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4478 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4485 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4493 if (isIllegalVectorType(Ty)) {
4494 uint64_t Size = getContext().getTypeSize(Ty);
4496 if (isAndroid() && (Size <= 16)) {
4497 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4501 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4506 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4511 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4514 return getNaturalAlignIndirect(Ty,
false);
4520 Ty = EnumTy->getDecl()->getIntegerType();
4530 return getNaturalAlignIndirect(Ty, RAA ==
4537 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
4544 const Type *Base =
nullptr;
4545 uint64_t Members = 0;
4546 if (isHomogeneousAggregate(Ty, Base, Members)) {
4548 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members));
4552 uint64_t Size = getContext().getTypeSize(Ty);
4554 unsigned Alignment = getContext().getTypeAlign(Ty);
4555 Size = 64 * ((Size + 63) / 64);
4559 if (Alignment < 128 && Size == 128) {
4560 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4566 return getNaturalAlignIndirect(Ty,
false);
4574 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
4575 return getNaturalAlignIndirect(RetTy);
4580 RetTy = EnumTy->getDecl()->getIntegerType();
4590 const Type *Base =
nullptr;
4591 uint64_t Members = 0;
4592 if (isHomogeneousAggregate(RetTy, Base, Members))
4597 uint64_t Size = getContext().getTypeSize(RetTy);
4599 unsigned Alignment = getContext().getTypeAlign(RetTy);
4600 Size = 64 * ((Size + 63) / 64);
4604 if (Alignment < 128 && Size == 128) {
4605 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4611 return getNaturalAlignIndirect(RetTy);
4615 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
4618 unsigned NumElements = VT->getNumElements();
4619 uint64_t Size = getContext().getTypeSize(VT);
4621 if (!llvm::isPowerOf2_32(NumElements))
4623 return Size != 64 && (Size != 128 || NumElements == 1);
4628 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4637 unsigned VecSize = getContext().getTypeSize(VT);
4638 if (VecSize == 64 || VecSize == 128)
4644 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
4645 uint64_t Members)
const {
4646 return Members <= 4;
4649 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
4657 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4661 unsigned NumRegs = 1;
4662 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4663 BaseTy = ArrTy->getElementType();
4664 NumRegs = ArrTy->getNumElements();
4666 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4684 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4691 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
4700 RegSize = llvm::alignTo(RegSize, 8);
4709 RegSize = 16 * NumRegs;
4721 UsingStack = CGF.
Builder.CreateICmpSGE(
4722 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
4724 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4733 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
4736 reg_offs = CGF.
Builder.CreateAdd(
4737 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
4739 reg_offs = CGF.
Builder.CreateAnd(
4740 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
4749 NewOffset = CGF.
Builder.CreateAdd(
4750 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
4756 InRegs = CGF.
Builder.CreateICmpSLE(
4757 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
4759 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4771 reg_top_offset,
"reg_top_p");
4773 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
4781 MemTy = llvm::PointerType::getUnqual(MemTy);
4784 const Type *Base =
nullptr;
4785 uint64_t NumMembers = 0;
4786 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4787 if (IsHFA && NumMembers > 1) {
4792 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
4793 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(Base, 0));
4795 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4797 std::max(TyAlign, BaseTyInfo.second));
4802 BaseTyInfo.first.getQuantity() < 16)
4803 Offset = 16 - BaseTyInfo.first.getQuantity();
4805 for (
unsigned i = 0; i < NumMembers; ++i) {
4823 CharUnits SlotSize = BaseAddr.getAlignment();
4826 TyInfo.first < SlotSize) {
4827 CharUnits Offset = SlotSize - TyInfo.first;
4850 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
4852 OnStackPtr = CGF.
Builder.CreateAdd(
4853 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
4855 OnStackPtr = CGF.
Builder.CreateAnd(
4856 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
4861 Address OnStackAddr(OnStackPtr,
4868 StackSize = StackSlotSize;
4870 StackSize = TyInfo.first.
alignTo(StackSlotSize);
4874 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
4880 TyInfo.first < StackSlotSize) {
4881 CharUnits Offset = StackSlotSize - TyInfo.first;
4894 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
4895 OnStackAddr, OnStackBlock,
"vaargs.addr");
4904 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
4923 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4927 bool IsIndirect =
false;
4928 if (TyInfo.first.getQuantity() > 16) {
4929 const Type *Base =
nullptr;
4930 uint64_t Members = 0;
4931 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
4935 TyInfo, SlotSize,
true);
4962 bool isEABI()
const {
4963 switch (getTarget().getTriple().getEnvironment()) {
4964 case llvm::Triple::Android:
4965 case llvm::Triple::EABI:
4966 case llvm::Triple::EABIHF:
4967 case llvm::Triple::GNUEABI:
4968 case llvm::Triple::GNUEABIHF:
4969 case llvm::Triple::MuslEABI:
4970 case llvm::Triple::MuslEABIHF:
4977 bool isEABIHF()
const {
4978 switch (getTarget().getTriple().getEnvironment()) {
4979 case llvm::Triple::EABIHF:
4980 case llvm::Triple::GNUEABIHF:
4981 case llvm::Triple::MuslEABIHF:
4988 ABIKind getABIKind()
const {
return Kind; }
4993 bool isIllegalVectorType(
QualType Ty)
const;
4995 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4996 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4997 uint64_t Members)
const override;
5008 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
5010 bool asReturnValue)
const override {
5017 ARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5020 const ARMABIInfo &getABIInfo()
const {
5028 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
5029 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5041 unsigned getSizeOfUnwindException()
const override {
5042 if (getABIInfo().isEABI())
return 88;
5046 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5048 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5052 const ARMInterruptAttr *
Attr = FD->getAttr<ARMInterruptAttr>();
5057 switch (Attr->getInterrupt()) {
5058 case ARMInterruptAttr::Generic: Kind =
"";
break;
5059 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
5060 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
5061 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
5062 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
5063 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
5066 llvm::Function *Fn = cast<llvm::Function>(GV);
5068 Fn->addFnAttr(
"interrupt", Kind);
5070 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5071 if (ABI == ARMABIInfo::APCS)
5077 llvm::AttrBuilder B;
5078 B.addStackAlignmentAttr(8);
5079 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
5081 llvm::AttributeSet::FunctionIndex,
5086 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
5088 WindowsARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5089 : ARMTargetCodeGenInfo(CGT, K) {}
5091 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5094 void getDependentLibraryOption(llvm::StringRef Lib,
5096 Opt =
"/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5099 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5101 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
5105 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5107 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5108 addStackProbeSizeTargetAttribute(D, GV, CGM);
5132 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5133 return llvm::CallingConv::ARM_AAPCS_VFP;
5135 return llvm::CallingConv::ARM_AAPCS;
5137 return llvm::CallingConv::ARM_APCS;
5143 switch (getABIKind()) {
5144 case APCS:
return llvm::CallingConv::ARM_APCS;
5145 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
5146 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5147 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
5149 llvm_unreachable(
"bad ABI kind");
5152 void ARMABIInfo::setCCs() {
5158 if (abiCC != getLLVMDefaultCC())
5164 switch (getABIKind()) {
5167 if (abiCC != getLLVMDefaultCC())
5172 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
5178 bool isVariadic)
const {
5186 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5191 if (isIllegalVectorType(Ty)) {
5192 uint64_t Size = getContext().getTypeSize(Ty);
5195 llvm::Type::getInt32Ty(getVMContext());
5200 llvm::Type::getInt32Ty(getVMContext()), 2);
5205 llvm::Type::getInt32Ty(getVMContext()), 4);
5208 return getNaturalAlignIndirect(Ty,
false);
5214 if (Ty->
isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5215 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5216 llvm::Type::getFloatTy(getVMContext()) :
5217 llvm::Type::getInt32Ty(getVMContext());
5224 Ty = EnumTy->getDecl()->getIntegerType();
5239 if (IsEffectivelyAAPCS_VFP) {
5242 const Type *Base =
nullptr;
5243 uint64_t Members = 0;
5244 if (isHomogeneousAggregate(Ty, Base, Members)) {
5245 assert(Base &&
"Base class should be set for homogeneous aggregate");
5249 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5253 const Type *Base =
nullptr;
5254 uint64_t Members = 0;
5255 if (isHomogeneousAggregate(Ty, Base, Members)) {
5256 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5258 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members);
5263 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5276 uint64_t ABIAlign = 4;
5277 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5278 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5279 getABIKind() == ARMABIInfo::AAPCS)
5283 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5286 TyAlign > ABIAlign);
5294 if (getContext().getTypeAlign(Ty) <= 32) {
5295 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5296 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5298 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5299 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5306 llvm::LLVMContext &VMContext) {
5338 if (!RT)
return false;
5349 bool HadField =
false;
5352 i != e; ++i, ++idx) {
5391 bool isVariadic)
const {
5392 bool IsEffectivelyAAPCS_VFP =
5393 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5399 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5400 return getNaturalAlignIndirect(RetTy);
5406 if (RetTy->
isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5407 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5408 llvm::Type::getFloatTy(getVMContext()) :
5409 llvm::Type::getInt32Ty(getVMContext());
5416 RetTy = EnumTy->getDecl()->getIntegerType();
5423 if (getABIKind() == APCS) {
5433 getVMContext(), getContext().getTypeSize(RetTy)));
5438 uint64_t Size = getContext().getTypeSize(RetTy);
5447 return getNaturalAlignIndirect(RetTy);
5456 if (IsEffectivelyAAPCS_VFP) {
5457 const Type *Base =
nullptr;
5458 uint64_t Members = 0;
5459 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5460 assert(Base &&
"Base class should be set for homogeneous aggregate");
5468 uint64_t Size = getContext().getTypeSize(RetTy);
5470 if (getDataLayout().isBigEndian())
5480 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5481 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5483 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5487 return getNaturalAlignIndirect(RetTy);
5491 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
5499 unsigned NumElements = VT->getNumElements();
5501 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5505 unsigned NumElements = VT->getNumElements();
5506 uint64_t Size = getContext().getTypeSize(VT);
5508 if (!llvm::isPowerOf2_32(NumElements))
5517 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5521 if (BT->
getKind() == BuiltinType::Float ||
5522 BT->
getKind() == BuiltinType::Double ||
5523 BT->
getKind() == BuiltinType::LongDouble)
5526 unsigned VecSize = getContext().getTypeSize(VT);
5527 if (VecSize == 64 || VecSize == 128)
5533 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5534 uint64_t Members)
const {
5535 return Members <= 4;
5538 Address ARMABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5549 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5550 CharUnits TyAlignForABI = TyInfo.second;
5553 bool IsIndirect =
false;
5554 const Type *Base =
nullptr;
5555 uint64_t Members = 0;
5562 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5563 !isHomogeneousAggregate(Ty, Base, Members)) {
5570 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5571 getABIKind() == ARMABIInfo::AAPCS) {
5574 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5581 TyInfo.second = TyAlignForABI;
5593 class NVPTXABIInfo :
public ABIInfo {
5610 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5615 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
5628 RetTy = EnumTy->getDecl()->getIntegerType();
5637 Ty = EnumTy->getDecl()->getIntegerType();
5641 return getNaturalAlignIndirect(Ty,
true);
5660 Address NVPTXABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5662 llvm_unreachable(
"NVPTX does not support varargs");
5665 void NVPTXTargetCodeGenInfo::
5666 setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5668 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5671 llvm::Function *F = cast<llvm::Function>(GV);
5677 if (FD->hasAttr<OpenCLKernelAttr>()) {
5680 addNVVMMetadata(F,
"kernel", 1);
5682 F->addFnAttr(llvm::Attribute::NoInline);
5691 if (FD->hasAttr<CUDAGlobalAttr>()) {
5693 addNVVMMetadata(F,
"kernel", 1);
5695 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
5697 llvm::APSInt MaxThreads(32);
5698 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
5700 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
5705 if (Attr->getMinBlocks()) {
5706 llvm::APSInt MinBlocks(32);
5707 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
5710 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
5716 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5718 llvm::Module *M = F->getParent();
5722 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
5724 llvm::Metadata *MDVals[] = {
5725 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5726 llvm::ConstantAsMetadata::get(
5727 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5729 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5746 bool isPromotableIntegerType(
QualType Ty)
const;
5747 bool isCompoundType(
QualType Ty)
const;
5748 bool isVectorArgumentType(
QualType Ty)
const;
5749 bool isFPArgumentType(
QualType Ty)
const;
5765 bool shouldPassIndirectlyForSwift(
CharUnits totalSize,
5767 bool asReturnValue)
const override {
5774 SystemZTargetCodeGenInfo(
CodeGenTypes &CGT,
bool HasVector)
5780 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
5783 Ty = EnumTy->getDecl()->getIntegerType();
5792 case BuiltinType::Int:
5793 case BuiltinType::UInt:
5801 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
5807 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
5808 return (HasVector &&
5810 getContext().getTypeSize(Ty) <= 128);
5813 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
5816 case BuiltinType::Float:
5817 case BuiltinType::Double:
5832 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5833 for (
const auto &I : CXXRD->bases()) {
5842 Found = GetSingleElementType(Base);
5846 for (
const auto *FD : RD->
fields()) {
5850 if (getContext().getLangOpts().CPlusPlus &&
5851 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5858 Found = GetSingleElementType(FD->
getType());
5870 Address SystemZABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5884 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5889 bool InFPRs =
false;
5890 bool IsVector =
false;
5894 DirectTy = llvm::PointerType::getUnqual(DirectTy);
5899 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5900 IsVector = ArgTy->isVectorTy();
5901 UnpaddedSize = TyInfo.first;
5902 DirectAlign = TyInfo.second;
5905 if (IsVector && UnpaddedSize > PaddedSize)
5907 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
5909 CharUnits Padding = (PaddedSize - UnpaddedSize);
5913 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
5919 Address OverflowArgAreaPtr =
5921 "overflow_arg_area_ptr");
5922 Address OverflowArgArea =
5931 "overflow_arg_area");
5939 unsigned MaxRegs, RegCountField, RegSaveIndex;
5950 RegPadding = Padding;
5957 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5964 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5971 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
5973 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
5976 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
5977 Address RegSaveAreaPtr =
5979 "reg_save_area_ptr");
5982 Address RawRegAddr(CGF.
Builder.CreateGEP(RegSaveArea, RegOffset,
5989 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5991 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
6001 Address OverflowArgArea =
6004 Address RawMemAddr =
6012 "overflow_arg_area");
6018 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
6019 MemAddr, InMemBlock,
"va_arg.addr");
6031 if (isVectorArgumentType(RetTy))
6033 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6034 return getNaturalAlignIndirect(RetTy);
6035 return (isPromotableIntegerType(RetTy) ?
6045 if (isPromotableIntegerType(Ty))
6051 uint64_t Size = getContext().getTypeSize(Ty);
6052 QualType SingleElementTy = GetSingleElementType(Ty);
6053 if (isVectorArgumentType(SingleElementTy) &&
6054 getContext().getTypeSize(SingleElementTy) == Size)
6058 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6059 return getNaturalAlignIndirect(Ty,
false);
6067 return getNaturalAlignIndirect(Ty,
false);
6071 if (isFPArgumentType(SingleElementTy)) {
6072 assert(Size == 32 || Size == 64);
6074 PassTy = llvm::Type::getFloatTy(getVMContext());
6076 PassTy = llvm::Type::getDoubleTy(getVMContext());
6078 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6083 if (isCompoundType(Ty))
6084 return getNaturalAlignIndirect(Ty,
false);
6099 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6105 void MSP430TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
6106 llvm::GlobalValue *GV,
6108 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6109 if (
const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6111 llvm::Function *F = cast<llvm::Function>(GV);
6114 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6117 F->addFnAttr(llvm::Attribute::NoInline);
6120 unsigned Num = attr->getNumber() / 2;
6122 "__isr_" + Twine(Num), F);
6133 class MipsABIInfo :
public ABIInfo {
6135 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6136 void CoerceToIntArgs(uint64_t TySize,
6140 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset)
const;
6143 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6144 StackAlignInBytes(IsO32 ? 8 : 16) {}
6151 bool shouldSignExtUnsignedType(
QualType Ty)
const override;
6155 unsigned SizeOfUnwindException;
6159 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6165 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6167 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6169 llvm::Function *Fn = cast<llvm::Function>(GV);
6170 if (FD->hasAttr<Mips16Attr>()) {
6171 Fn->addFnAttr(
"mips16");
6173 else if (FD->hasAttr<NoMips16Attr>()) {
6174 Fn->addFnAttr(
"nomips16");
6177 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6182 switch (Attr->getInterrupt()) {
6183 case MipsInterruptAttr::eic: Kind =
"eic";
break;
6184 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
6185 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
6186 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
6187 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
6188 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
6189 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
6190 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
6191 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
6194 Fn->addFnAttr(
"interrupt", Kind);
6201 unsigned getSizeOfUnwindException()
const override {
6202 return SizeOfUnwindException;
6207 void MipsABIInfo::CoerceToIntArgs(
6209 llvm::IntegerType *IntTy =
6210 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6213 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6214 ArgList.push_back(IntTy);
6217 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6220 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6229 CoerceToIntArgs(TySize, ArgList);
6230 return llvm::StructType::get(getVMContext(), ArgList);
6234 return CGT.ConvertType(Ty);
6240 CoerceToIntArgs(TySize, ArgList);
6241 return llvm::StructType::get(getVMContext(), ArgList);
6246 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6248 uint64_t LastOffset = 0;
6250 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6255 i != e; ++i, ++idx) {
6259 if (!BT || BT->
getKind() != BuiltinType::Double)
6267 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6268 ArgList.push_back(I64);
6271 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6272 LastOffset = Offset + 64;
6275 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6276 ArgList.append(IntArgList.begin(), IntArgList.end());
6278 return llvm::StructType::get(getVMContext(), ArgList);
6281 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6282 uint64_t Offset)
const {
6283 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6286 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6293 uint64_t OrigOffset =
Offset;
6294 uint64_t TySize = getContext().getTypeSize(Ty);
6295 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6298 (uint64_t)StackAlignInBytes);
6299 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6300 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6308 Offset = OrigOffset + MinABIStackAlignInBytes;
6317 getPaddingType(OrigOffset, CurrOffset));
6324 Ty = EnumTy->getDecl()->getIntegerType();
6331 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6335 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6355 for (; b != e; ++b) {
6361 RTList.push_back(CGT.ConvertType(b->getType()));
6365 return llvm::StructType::get(getVMContext(), RTList,
6366 RD->hasAttr<PackedAttr>());
6372 CoerceToIntArgs(Size, RTList);
6373 return llvm::StructType::get(getVMContext(), RTList);
6377 uint64_t Size = getContext().getTypeSize(RetTy);
6384 if (!IsO32 && Size == 0)
6403 return getNaturalAlignIndirect(RetTy);
6408 RetTy = EnumTy->getDecl()->getIntegerType();
6420 uint64_t Offset = RetInfo.
isIndirect() ? MinABIStackAlignInBytes : 0;
6426 Address MipsABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6432 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6433 unsigned PtrWidth = getTarget().getPointerWidth(0);
6434 bool DidPromote =
false;
6436 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6439 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6443 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6454 TyInfo, ArgSlotSize,
true);
6460 Address Temp = CGF.
CreateMemTemp(OrigTy,
"vaarg.promotion-temp");
6477 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
6478 int TySize = getContext().getTypeSize(Ty);
6523 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
6526 : DefaultTargetCodeGenInfo(CGT) {}
6528 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6532 void TCETargetCodeGenInfo::setTargetAttributes(
6534 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6537 llvm::Function *F = cast<llvm::Function>(GV);
6540 if (FD->hasAttr<OpenCLKernelAttr>()) {
6542 F->addFnAttr(llvm::Attribute::NoInline);
6543 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6546 llvm::LLVMContext &Context = F->getContext();
6547 llvm::NamedMDNode *OpenCLMetadata =
6549 "opencl.kernel_wg_size_info");
6552 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6555 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6556 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6558 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6559 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6561 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6562 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6568 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6569 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6583 class HexagonABIInfo :
public ABIInfo {
6623 Ty = EnumTy->getDecl()->getIntegerType();
6636 uint64_t Size = getContext().getTypeSize(Ty);
6638 return getNaturalAlignIndirect(Ty,
true);
6655 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 64)
6656 return getNaturalAlignIndirect(RetTy);
6661 RetTy = EnumTy->getDecl()->getIntegerType();
6672 uint64_t Size = getContext().getTypeSize(RetTy);
6684 return getNaturalAlignIndirect(RetTy,
true);
6687 Address HexagonABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6691 getContext().getTypeInfoInChars(Ty),
6701 class LanaiABIInfo :
public DefaultABIInfo {
6705 bool shouldUseInReg(
QualType Ty, CCState &State)
const;
6728 bool LanaiABIInfo::shouldUseInReg(
QualType Ty, CCState &State)
const {
6729 unsigned Size = getContext().getTypeSize(Ty);
6730 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
6732 if (SizeInRegs == 0)
6735 if (SizeInRegs > State.FreeRegs) {
6740 State.FreeRegs -= SizeInRegs;
6746 CCState &State)
const {
6748 if (State.FreeRegs) {
6750 return getNaturalAlignIndirectInReg(Ty);
6752 return getNaturalAlignIndirect(Ty,
false);
6756 const unsigned MinABIStackAlignInBytes = 4;
6757 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
6760 MinABIStackAlignInBytes);
6764 CCState &State)
const {
6770 return getIndirectResult(Ty,
false, State);
6772 return getNaturalAlignIndirect(Ty,
true);
6779 return getIndirectResult(Ty,
true, State);
6785 llvm::LLVMContext &LLVMContext = getVMContext();
6786 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6787 if (SizeInRegs <= State.FreeRegs) {
6788 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
6790 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
6791 State.FreeRegs -= SizeInRegs;
6796 return getIndirectResult(Ty,
true, State);
6801 Ty = EnumTy->getDecl()->getIntegerType();
6803 bool InReg = shouldUseInReg(Ty, State);
6832 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6834 unsigned getOpenCLKernelCallingConv()
const override;
6839 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6841 llvm::GlobalValue *GV,
6843 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6847 if (
const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6848 llvm::Function *F = cast<llvm::Function>(GV);
6849 uint32_t NumVGPR = Attr->getNumVGPR();
6851 F->addFnAttr(
"amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6854 if (
const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6855 llvm::Function *F = cast<llvm::Function>(GV);
6856 unsigned NumSGPR = Attr->getNumSGPR();
6858 F->addFnAttr(
"amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6863 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
6864 return llvm::CallingConv::AMDGPU_KERNEL;
6874 class SparcV8ABIInfo :
public DefaultABIInfo {
6876 SparcV8ABIInfo(
CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6937 class SparcV9ABIInfo :
public ABIInfo {
6958 struct CoerceBuilder {
6960 const llvm::DataLayout &DL;
6965 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
6966 : Context(c), DL(dl), Size(0), InReg(
false) {}
6969 void pad(uint64_t ToSize) {
6970 assert(ToSize >= Size &&
"Cannot remove elements");
6975 uint64_t Aligned = llvm::alignTo(Size, 64);
6976 if (Aligned > Size && Aligned <= ToSize) {
6977 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6982 while (Size + 64 <= ToSize) {
6983 Elems.push_back(llvm::Type::getInt64Ty(Context));
6988 if (Size < ToSize) {
6989 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6995 void addFloat(uint64_t Offset,
llvm::Type *Ty,
unsigned Bits) {
7003 Elems.push_back(Ty);
7004 Size = Offset + Bits;
7008 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7009 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7010 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7011 llvm::Type *ElemTy = StrTy->getElementType(i);
7012 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7013 switch (ElemTy->getTypeID()) {
7014 case llvm::Type::StructTyID:
7015 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7017 case llvm::Type::FloatTyID:
7018 addFloat(ElemOffset, ElemTy, 32);
7020 case llvm::Type::DoubleTyID:
7021 addFloat(ElemOffset, ElemTy, 64);
7023 case llvm::Type::FP128TyID:
7024 addFloat(ElemOffset, ElemTy, 128);
7026 case llvm::Type::PointerTyID:
7027 if (ElemOffset % 64 == 0) {
7029 Elems.push_back(ElemTy);
7040 bool isUsableType(llvm::StructType *Ty)
const {
7041 return llvm::makeArrayRef(Elems) == Ty->elements();
7046 if (Elems.size() == 1)
7047 return Elems.front();
7049 return llvm::StructType::get(Context, Elems);
7060 uint64_t Size = getContext().getTypeSize(Ty);
7064 if (Size > SizeLimit)
7065 return getNaturalAlignIndirect(Ty,
false);
7069 Ty = EnumTy->getDecl()->getIntegerType();
7072 if (Size < 64 && Ty->isIntegerType())
7086 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7090 CoerceBuilder CB(getVMContext(), getDataLayout());
7091 CB.addStruct(0, StrTy);
7092 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7095 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7103 Address SparcV9ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
7113 Address Addr(Builder.
CreateLoad(VAListAddr,
"ap.cur"), SlotSize);
7114 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7116 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7124 llvm_unreachable(
"Unsupported ABI kind for va_arg");
7134 auto AllocSize = getDataLayout().getTypeAllocSize(AI.
getCoerceToType());
7143 ArgAddr = Address(Builder.
CreateLoad(ArgAddr,
"indirect.arg"),
7148 return Address(llvm::UndefValue::get(ArgPtrTy),
TypeInfo.second);
7156 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
7188 llvm::IntegerType *i8 = CGF.
Int8Ty;
7189 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7190 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7279 class TypeStringCache {
7280 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7284 std::string Swapped;
7287 std::map<const IdentifierInfo *, struct Entry>
Map;
7288 unsigned IncompleteCount;
7289 unsigned IncompleteUsedCount;
7291 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7301 class FieldEncoding {
7305 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7306 StringRef str() {
return Enc.c_str();}
7307 bool operator<(
const FieldEncoding &rhs)
const {
7308 if (HasName != rhs.HasName)
return HasName;
7309 return Enc < rhs.Enc;
7313 class XCoreABIInfo :
public DefaultABIInfo {
7321 mutable TypeStringCache TSC;
7325 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7333 Address XCoreABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
7339 Address AP(Builder.
CreateLoad(VAListAddr), SlotSize);
7343 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7345 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7346 AI.setCoerceToType(ArgTy);
7347 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7351 switch (AI.getKind()) {
7355 llvm_unreachable(
"Unsupported ABI kind for va_arg");
7357 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7364 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7365 ArgSize = ArgSize.
alignTo(SlotSize);
7369 Val = Address(Builder.
CreateLoad(Val), TypeAlign);
7390 std::string StubEnc) {
7394 assert( (E.Str.empty() || E.State == Recursive) &&
7395 "Incorrectly use of addIncomplete");
7396 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
7397 E.Swapped.swap(E.Str);
7398 E.Str.swap(StubEnc);
7399 E.State = Incomplete;
7407 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
7410 auto I =
Map.find(ID);
7411 assert(I !=
Map.end() &&
"Entry not present");
7412 Entry &E = I->second;
7413 assert( (E.State == Incomplete ||
7414 E.State == IncompleteUsed) &&
7415 "Entry must be an incomplete type");
7416 bool IsRecursive =
false;
7417 if (E.State == IncompleteUsed) {
7420 --IncompleteUsedCount;
7422 if (E.Swapped.empty())
7426 E.Swapped.swap(E.Str);
7428 E.State = Recursive;
7436 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
7438 if (!ID || IncompleteUsedCount)
7441 if (IsRecursive && !E.Str.empty()) {
7442 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7443 "This is not the same Recursive entry");
7449 assert(E.Str.empty() &&
"Entry already present");
7451 E.State = IsRecursive? Recursive : NonRecursive;
7460 auto I =
Map.find(ID);
7463 Entry &E = I->second;
7464 if (E.State == Recursive && IncompleteCount)
7467 if (E.State == Incomplete) {
7469 E.State = IncompleteUsed;
7470 ++IncompleteUsedCount;
7472 return E.Str.c_str();
7491 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7495 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
7496 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
7497 llvm::MDString::get(Ctx, Enc.str())};
7498 llvm::NamedMDNode *MD =
7499 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
7500 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7513 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7515 unsigned getOpenCLKernelCallingConv()
const override;
7520 void SPIRTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7522 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
7523 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
7527 llvm::Metadata *SPIRVerElts[] = {
7528 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 2)),
7529 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 0))};
7530 llvm::NamedMDNode *SPIRVerMD =
7531 M.getOrInsertNamedMetadata(
"opencl.spir.version");
7532 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
7535 llvm::Metadata *OCLVerElts[] = {
7536 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7538 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7539 Int32Ty, (CGM.
getLangOpts().OpenCLVersion % 100) / 10))};
7540 llvm::NamedMDNode *OCLVerMD =
7541 M.getOrInsertNamedMetadata(
"opencl.ocl.version");
7542 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
7545 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
7546 return llvm::CallingConv::SPIR_KERNEL;
7551 TypeStringCache &TSC);
7559 TypeStringCache &TSC) {
7560 for (
const auto *Field : RD->
fields()) {
7563 Enc += Field->getName();
7565 if (Field->isBitField()) {
7567 llvm::raw_svector_ostream OS(Enc);
7568 OS << Field->getBitWidthValue(CGM.
getContext());
7571 if (!
appendType(Enc, Field->getType(), CGM, TSC))
7573 if (Field->isBitField())
7576 FE.emplace_back(!Field->getName().empty(), Enc);
7588 StringRef TypeString = TSC.lookupStr(ID);
7589 if (!TypeString.empty()) {
7595 size_t Start = Enc.size();
7603 bool IsRecursive =
false;
7610 std::string StubEnc(Enc.substr(Start).str());
7612 TSC.addIncomplete(ID, std::move(StubEnc));
7614 (void) TSC.removeIncomplete(ID);
7617 IsRecursive = TSC.removeIncomplete(ID);
7621 std::sort(FE.begin(), FE.end());
7623 unsigned E = FE.size();
7624 for (
unsigned I = 0; I !=
E; ++
I) {
7631 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
7637 TypeStringCache &TSC,
7640 StringRef TypeString = TSC.lookupStr(ID);
7641 if (!TypeString.empty()) {
7646 size_t Start = Enc.size();
7655 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I !=
E;
7657 SmallStringEnc EnumEnc;
7659 EnumEnc += I->getName();
7661 I->getInitVal().toString(EnumEnc);
7663 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
7665 std::sort(FE.begin(), FE.end());
7666 unsigned E = FE.size();
7667 for (
unsigned I = 0; I !=
E; ++
I) {
7674 TSC.addIfComplete(ID, Enc.substr(Start),
false);
7682 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
7690 Enc += Table[Lookup];
7695 const char *EncType;
7697 case BuiltinType::Void:
7700 case BuiltinType::Bool:
7703 case BuiltinType::Char_U:
7706 case BuiltinType::UChar:
7709 case BuiltinType::SChar:
7712 case BuiltinType::UShort:
7715 case BuiltinType::Short:
7718 case BuiltinType::UInt:
7721 case BuiltinType::Int:
7724 case BuiltinType::ULong:
7727 case BuiltinType::Long:
7730 case BuiltinType::ULongLong:
7733 case BuiltinType::LongLong:
7736 case BuiltinType::Float:
7739 case BuiltinType::Double:
7742 case BuiltinType::LongDouble:
7755 TypeStringCache &TSC) {
7767 TypeStringCache &TSC, StringRef NoSizeEnc) {
7772 CAT->getSize().toStringUnsigned(Enc);
7788 TypeStringCache &TSC) {
7795 auto I = FPT->param_type_begin();
7796 auto E = FPT->param_type_end();
7805 if (FPT->isVariadic())
7808 if (FPT->isVariadic())
7822 TypeStringCache &TSC) {
7859 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7865 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7868 QualType QT = VD->getType().getCanonicalType();
7894 if (TheTargetCodeGenInfo)
7895 return *TheTargetCodeGenInfo;
7899 this->TheTargetCodeGenInfo.reset(
P);
7904 switch (Triple.getArch()) {
7906 return SetCGInfo(
new DefaultTargetCodeGenInfo(Types));
7908 case llvm::Triple::le32:
7909 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
7910 case llvm::Triple::mips:
7911 case llvm::Triple::mipsel:
7912 if (Triple.getOS() == llvm::Triple::NaCl)
7913 return SetCGInfo(
new PNaClTargetCodeGenInfo(Types));
7914 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
true));
7916 case llvm::Triple::mips64:
7917 case llvm::Triple::mips64el:
7918 return SetCGInfo(
new MIPSTargetCodeGenInfo(Types,
false));
7920 case llvm::Triple::aarch64:
7921 case llvm::Triple::aarch64_be: {
7922 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7923 if (
getTarget().getABI() ==
"darwinpcs")
7924 Kind = AArch64ABIInfo::DarwinPCS;
7926 return SetCGInfo(
new AArch64TargetCodeGenInfo(Types, Kind));
7929 case llvm::Triple::wasm32:
7930 case llvm::Triple::wasm64:
7931 return SetCGInfo(
new WebAssemblyTargetCodeGenInfo(Types));
7933 case llvm::Triple::arm:
7934 case llvm::Triple::armeb:
7935 case llvm::Triple::thumb:
7936 case llvm::Triple::thumbeb: {
7937 if (Triple.getOS() == llvm::Triple::Win32) {
7939 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
7942 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7944 if (ABIStr ==
"apcs-gnu")
7945 Kind = ARMABIInfo::APCS;
7946 else if (ABIStr ==
"aapcs16")
7947 Kind = ARMABIInfo::AAPCS16_VFP;
7948 else if (CodeGenOpts.
FloatABI ==
"hard" ||
7950 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
7951 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
7952 Triple.getEnvironment() == llvm::Triple::EABIHF)))
7953 Kind = ARMABIInfo::AAPCS_VFP;
7955 return SetCGInfo(
new ARMTargetCodeGenInfo(Types, Kind));
7958 case llvm::Triple::ppc:
7960 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.
FloatABI ==
"soft"));
7961 case llvm::Triple::ppc64:
7962 if (Triple.isOSBinFormatELF()) {
7963 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7965 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7968 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7970 return SetCGInfo(
new PPC64TargetCodeGenInfo(Types));
7971 case llvm::Triple::ppc64le: {
7972 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
7973 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7975 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7978 return SetCGInfo(
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7981 case llvm::Triple::nvptx:
7982 case llvm::Triple::nvptx64:
7983 return SetCGInfo(
new NVPTXTargetCodeGenInfo(Types));
7985 case llvm::Triple::msp430:
7986 return SetCGInfo(
new MSP430TargetCodeGenInfo(Types));
7988 case llvm::Triple::systemz: {
7990 return SetCGInfo(
new SystemZTargetCodeGenInfo(Types, HasVector));
7993 case llvm::Triple::tce:
7994 return SetCGInfo(
new TCETargetCodeGenInfo(Types));
7996 case llvm::Triple::x86: {
7997 bool IsDarwinVectorABI = Triple.isOSDarwin();
7998 bool RetSmallStructInRegABI =
7999 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8000 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8002 if (Triple.getOS() == llvm::Triple::Win32) {
8003 return SetCGInfo(
new WinX86_32TargetCodeGenInfo(
8004 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8005 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8007 return SetCGInfo(
new X86_32TargetCodeGenInfo(
8008 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8009 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8014 case llvm::Triple::x86_64: {
8018 ? X86AVXABILevel::AVX512
8021 switch (Triple.getOS()) {
8022 case llvm::Triple::Win32:
8023 return SetCGInfo(
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8024 case llvm::Triple::PS4:
8025 return SetCGInfo(
new PS4TargetCodeGenInfo(Types, AVXLevel));
8027 return SetCGInfo(
new X86_64TargetCodeGenInfo(Types, AVXLevel));
8030 case llvm::Triple::hexagon:
8031 return SetCGInfo(
new HexagonTargetCodeGenInfo(Types));
8032 case llvm::Triple::lanai:
8033 return SetCGInfo(
new LanaiTargetCodeGenInfo(Types));
8034 case llvm::Triple::r600:
8035 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8036 case llvm::Triple::amdgcn:
8037 return SetCGInfo(
new AMDGPUTargetCodeGenInfo(Types));
8038 case llvm::Triple::sparc:
8039 return SetCGInfo(
new SparcV8TargetCodeGenInfo(Types));
8040 case llvm::Triple::sparcv9:
8041 return SetCGInfo(
new SparcV9TargetCodeGenInfo(Types));
8042 case llvm::Triple::xcore:
8043 return SetCGInfo(
new XCoreTargetCodeGenInfo(Types));
8044 case llvm::Triple::spir:
8045 case llvm::Triple::spir64:
8046 return SetCGInfo(
new SPIRTargetCodeGenInfo(Types));
Ignore - Ignore the argument (treat as void).
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
static bool occupiesMoreThan(CodeGenTypes &cgt, ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters)
Does the given lowering require more than the given number of registers when expanded?
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
llvm::Type * ConvertTypeForMem(QualType T)
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
CanQualType getReturnType() const
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
bool isBitField() const
Determines whether this field is a bitfield.
bool isMemberPointerType() const
unsigned getInAllocaFieldIndex() const
llvm::Module & getModule() const
llvm::LLVMContext & getLLVMContext()
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Extend - Valid only for integer argument types.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
llvm::LoadInst * CreateDefaultAlignedLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Address getAddress() const
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
CodeGen::CGCXXABI & getCXXABI() const
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
CGCXXABI & getCXXABI() const
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
bool hasFlexibleArrayMember() const
bool isEnumeralType() const
ASTContext & getContext() const
const llvm::DataLayout & getDataLayout() const
The base class of the type hierarchy.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
bool isBlockPointerType() const
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
virtual ~TargetCodeGenInfo()
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
VarDecl - An instance of this class is created to represent a variable declaration or definition...
llvm::Type * getElementType() const
Return the type of the values stored in this address.
CallingConv getCallConv() const
field_iterator field_begin() const
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
llvm::LLVMContext & getVMContext() const
RecordDecl - Represents a struct/union/class.
const_arg_iterator arg_end() const
CodeGen::CodeGenTypes & CGT
One of these records is kept for each identifier that is lexed.
bool isScalarType() const
class LLVM_ALIGNAS(8) DependentTemplateSpecializationType const IdentifierInfo * Name
Represents a template specialization type whose template cannot be resolved, e.g. ...
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
llvm::IntegerType * Int64Ty
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
bool isReferenceType() const
bool isStructureOrClassType() const
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
llvm::Type * getCoerceToType() const
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
QualType getReturnType() const
field_range fields() const
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
RecordDecl * getDecl() const
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
CharUnits getPointerSize() const
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
virtual StringRef getABI() const
Get the ABI currently in use.
detail::InMemoryDirectory::const_iterator I
llvm::StructType * getCoerceAndExpandType() const
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
std::string FloatABI
The ABI to use for passing floating point arguments.
field_iterator field_end() const
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
EnumDecl * getDecl() const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
unsigned getNumRequiredArgs() const
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
static CharUnits One()
One - Construct a CharUnits quantity of one.
const llvm::DataLayout & getDataLayout() const
Represents a prototype with parameter type info, e.g.
bool isFloatingPoint() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
const TargetInfo & getTarget() const
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
bool isRealFloatingType() const
Floating point categories.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Exposes information about the current target.
llvm::Value * getPointer() const
StringRef getName() const
Return the actual identifier string.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
bool isAnyComplexType() const
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
CharUnits getIndirectAlign() const
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
ASTContext & getContext() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isFloatingType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
char __ovld __cnfn min(char x, char y)
Returns y if y < x, otherwise it returns x.
llvm::LLVMContext & getLLVMContext()
llvm::IntegerType * Int32Ty
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
Represents a GCC generic vector type.
Implements C++ ABI-specific semantic analysis functions.
unsigned getRegParm() const
The result type of a method or function.
llvm::Type * getPaddingType() const
RecordDecl * getDefinition() const
getDefinition - Returns the RecordDecl that actually defines this struct/union/class.
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
The l-value was considered opaque, so the alignment was determined from a type.
Pass it as a pointer to temporary memory.
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
ASTContext & getContext() const
CharUnits getPointerAlign() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
bool isBuiltinType() const
Helper methods to distinguish type categories.
const llvm::DataLayout & getDataLayout() const
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
CoerceAndExpand - Only valid for aggregate argument types.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
const CodeGenOptions & getCodeGenOpts() const
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isVectorType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
bool isMemberFunctionPointerType() const
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
QualType getPointeeType() const
bool isSRetAfterThis() const
CGFunctionInfo - Class to encapsulate the information about a function definition.
CharUnits getAlignment() const
Return the alignment of this pointer.
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
A refining implementation of ABIInfo for targets that support swiftcall.
bool isZero() const
isZero - Test whether the quantity equals zero.
unsigned getDirectOffset() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateMemTemp(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
EnumDecl - Represents an enum.
detail::InMemoryDirectory::const_iterator E
for(auto typeArg:T->getTypeArgsAsWritten())
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
const llvm::Triple & getTriple() const
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned Map[Count]
The type of a lookup table which maps from language-specific address spaces to target-specific ones...
const RecordType * getAsStructureType() const
llvm::PointerType * getType() const
Return the type of the pointer value.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
const T * getAs() const
Member-template getAs<specific type>'.
QualType getCanonicalType() const
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LanguageLinkage getLanguageLinkage() const
Compute the language linkage.
Implements C++ ABI-specific code generation functions.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
const TargetInfo & getTarget() const
Expand - Only valid for aggregate argument types.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Represents a base class of a C++ class.
char __ovld __cnfn max(char x, char y)
Returns y if x < y, otherwise it returns x.
Pass it on the stack using its defined layout.
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
BoundNodesTreeBuilder *const Builder
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
llvm::Type * ConvertType(QualType T)
bool getHasRegParm() const
ArraySizeModifier getSizeModifier() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorType::VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
This class is used for builtin types like 'int'.
const TargetInfo & getTarget() const
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
const_arg_iterator arg_begin() const
bool getIndirectByVal() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
QualType getElementType() const
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
bool getIndirectRealign() const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
LValue - This represents an lvalue references.
ASTContext & getContext() const
void setInAllocaSRet(bool SRet)
EnumDecl * getDefinition() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
bool isConstQualified() const
Determine whether this type is const-qualified.
RecordArgABI
Specify how one should pass an argument of a record type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CallArgList - Type for representing both the value and type of arguments in a call.
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
Attr - This represents one attribute.
bool supportsCOMDAT() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
bool isPointerType() const