104#include "llvm/IR/IntrinsicsAMDGPU.h"
114#define DEBUG_TYPE "amdgpu-sw-lower-lds"
115#define COV5_HIDDEN_DYN_LDS_SIZE_ARG 15
123 AsanInstrumentLDS(
"amdgpu-asan-instrument-lds",
124 cl::desc(
"Run asan instrumentation on LDS instructions "
125 "lowered to global memory"),
130struct LDSAccessTypeInfo {
138struct KernelLDSParameters {
142 LDSAccessTypeInfo DirectAccess;
143 LDSAccessTypeInfo IndirectAccess;
145 LDSToReplacementIndicesMap;
153struct NonKernelLDSParameters {
160struct AsanInstrumentInfo {
166struct FunctionsAndLDSAccess {
174class AMDGPUSwLowerLDS {
177 DomTreeCallback Callback)
178 : M(
Mod), AMDGPUTM(TM), IRB(M.getContext()), DTCallback(Callback) {}
180 void getUsesOfLDSByNonKernels();
181 void getNonKernelsWithLDSArguments(
const CallGraph &CG);
186 void buildSwLDSGlobal(
Function *Func);
187 void buildSwDynLDSGlobal(
Function *Func);
188 void populateSwMetadataGlobal(
Function *Func);
189 void populateSwLDSAttributeAndMetadata(
Function *Func);
190 void populateLDSToReplacementIndicesMap(
Function *Func);
191 void getLDSMemoryInstructions(
Function *Func,
193 void replaceKernelLDSAccesses(
Function *Func);
194 Value *getTranslatedGlobalMemoryPtrOfLDS(
Value *LoadMallocPtr,
Value *LDSPtr);
195 void translateLDSMemoryOperationsToGlobalMemory(
200 void buildNonKernelLDSOffsetTable(NonKernelLDSParameters &NKLDSParams);
201 void buildNonKernelLDSBaseTable(NonKernelLDSParameters &NKLDSParams);
203 getAddressesOfVariablesInKernel(
Function *Func,
205 void lowerNonKernelLDSAccesses(
Function *Func,
207 NonKernelLDSParameters &NKLDSParams);
209 updateMallocSizeForDynamicLDS(
Function *Func,
Value **CurrMallocSize,
210 Value *HiddenDynLDSSize,
218 DomTreeCallback DTCallback;
219 FunctionsAndLDSAccess FuncLDSAccessInfo;
220 AsanInstrumentInfo AsanInfo;
223template <
typename T>
SetVector<T> sortByName(std::vector<T> &&V) {
226 sort(V, [](
const auto *L,
const auto *R) {
227 return L->getName() < R->getName();
236 std::vector<GlobalVariable *>(Variables.
begin(), Variables.
end()));
244 if (Kernels.size() > UINT32_MAX) {
248 sortByName(std::vector<Function *>(Kernels.begin(), Kernels.end()));
249 for (
size_t i = 0; i < Kernels.size(); i++) {
254 Func->setMetadata(
"llvm.amdgcn.lds.kernel.id",
257 return OrderedKernels;
260void AMDGPUSwLowerLDS::getNonKernelsWithLDSArguments(
const CallGraph &CG) {
264 for (
auto &K : FuncLDSAccessInfo.KernelToLDSParametersMap) {
269 for (
auto &
I : *CGN) {
278 Type *ArgTy = (*AI).getType();
283 FuncLDSAccessInfo.NonKernelsWithLDSArgument.
insert(CalledFunc);
286 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess.
insert(Func);
292void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels() {
293 for (
GlobalVariable *GV : FuncLDSAccessInfo.AllNonKernelLDSAccess) {
301 FuncLDSAccessInfo.NonKernelToLDSAccessMap[
F].insert(GV);
315 ConstantInt::get(IntTy, Address + 1));
316 GV->
setMetadata(LLVMContext::MD_absolute_symbol, MetadataNode);
327 Func->addFnAttr(
"amdgpu-lds-size", Buffer);
333 IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
336 Intrinsic::donothing, {});
338 Value *UseInstance[1] = {
339 Builder.CreateConstInBoundsGEP1_32(SGV->
getValueType(), SGV, 0)};
341 Builder.CreateCall(Decl, {},
345void AMDGPUSwLowerLDS::buildSwLDSGlobal(
Function *Func) {
348 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
356 LDSParams.SwLDS->setSanitizerMetadata(MD);
359void AMDGPUSwLowerLDS::buildSwDynLDSGlobal(
Function *Func) {
361 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
362 if (LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
363 LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
369 "llvm.amdgcn." + Func->getName() +
".dynlds",
nullptr,
371 markUsedByKernel(Func, LDSParams.SwDynLDS);
374 LDSParams.SwDynLDS->setSanitizerMetadata(MD);
377void AMDGPUSwLowerLDS::populateSwLDSAttributeAndMetadata(
Function *Func) {
378 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
379 bool IsDynLDSUsed = LDSParams.SwDynLDS;
381 recordLDSAbsoluteAddress(M, LDSParams.SwLDS, 0);
382 addLDSSizeAttribute(Func,
Offset, IsDynLDSUsed);
383 if (LDSParams.SwDynLDS)
384 recordLDSAbsoluteAddress(M, LDSParams.SwDynLDS,
Offset);
387void AMDGPUSwLowerLDS::populateSwMetadataGlobal(
Function *Func) {
390 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
391 auto &Ctx = M.getContext();
392 auto &
DL = M.getDataLayout();
393 std::vector<Type *> Items;
395 std::vector<Constant *> Initializers;
396 Align MaxAlignment(1);
399 MaxAlignment = std::max(MaxAlignment, GVAlign);
402 for (
GlobalVariable *GV : LDSParams.DirectAccess.StaticLDSGlobals)
403 UpdateMaxAlignment(GV);
405 for (
GlobalVariable *GV : LDSParams.DirectAccess.DynamicLDSGlobals)
406 UpdateMaxAlignment(GV);
408 for (
GlobalVariable *GV : LDSParams.IndirectAccess.StaticLDSGlobals)
409 UpdateMaxAlignment(GV);
411 for (
GlobalVariable *GV : LDSParams.IndirectAccess.DynamicLDSGlobals)
412 UpdateMaxAlignment(GV);
417 MDItemOS <<
"llvm.amdgcn.sw.lds." << Func->getName() <<
".md.item";
421 uint32_t &MallocSize = LDSParams.MallocSize;
423 int AsanScale = AsanInfo.Scale;
424 auto buildInitializerForSwLDSMD =
426 for (
auto &GV : LDSGlobals) {
429 UniqueLDSGlobals.
insert(GV);
432 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
433 Items.push_back(LDSItemTy);
440 MallocSize += SizeInBytes;
442 LDSParams.RedzoneOffsetAndSizeVector.emplace_back(MallocSize,
444 MallocSize += RightRedzoneSize;
447 alignTo(SizeInBytes + RightRedzoneSize, MaxAlignment);
449 ConstantInt::get(
Int32Ty, AlignedSize);
451 MallocSize =
alignTo(MallocSize, MaxAlignment);
454 AlignedSizeInBytesConst});
455 Initializers.push_back(InitItem);
459 SwLDSVector.
insert(LDSParams.SwLDS);
460 buildInitializerForSwLDSMD(SwLDSVector);
461 buildInitializerForSwLDSMD(LDSParams.DirectAccess.StaticLDSGlobals);
462 buildInitializerForSwLDSMD(LDSParams.IndirectAccess.StaticLDSGlobals);
463 buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
464 buildInitializerForSwLDSMD(LDSParams.IndirectAccess.DynamicLDSGlobals);
467 Type *Ty = LDSParams.SwLDS->getValueType();
468 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
470 LDSParams.LDSSize = AlignedSize;
473 MDTypeOS <<
"llvm.amdgcn.sw.lds." << Func->getName() <<
".md.type";
478 MDOS <<
"llvm.amdgcn.sw.lds." << Func->getName() <<
".md";
484 LDSParams.SwLDSMetadata->setInitializer(
data);
487 LDSParams.SwLDS->setAlignment(MaxAlignment);
488 if (LDSParams.SwDynLDS)
489 LDSParams.SwDynLDS->setAlignment(MaxAlignment);
492 LDSParams.SwLDSMetadata->setSanitizerMetadata(MD);
495void AMDGPUSwLowerLDS::populateLDSToReplacementIndicesMap(
Function *Func) {
498 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
502 for (
auto &GV : LDSGlobals) {
505 UniqueLDSGlobals.
insert(GV);
506 LDSParams.LDSToReplacementIndicesMap[GV] = {0, Idx, 0};
512 SwLDSVector.
insert(LDSParams.SwLDS);
513 PopulateIndices(SwLDSVector, Idx);
514 PopulateIndices(LDSParams.DirectAccess.StaticLDSGlobals, Idx);
515 PopulateIndices(LDSParams.IndirectAccess.StaticLDSGlobals, Idx);
516 PopulateIndices(LDSParams.DirectAccess.DynamicLDSGlobals, Idx);
517 PopulateIndices(LDSParams.IndirectAccess.DynamicLDSGlobals, Idx);
521 Value *Replacement) {
523 auto ReplaceUsesLambda = [Func](
const Use &U) ->
bool {
524 auto *V = U.getUser();
526 auto *Func1 = Inst->getFunction();
535void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(
Function *Func) {
536 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
544 auto &IndirectAccess = LDSParams.IndirectAccess;
545 auto &DirectAccess = LDSParams.DirectAccess;
549 for (
auto &GV : LDSGlobals) {
552 if ((IndirectAccess.StaticLDSGlobals.contains(GV) ||
553 IndirectAccess.DynamicLDSGlobals.contains(GV)) &&
554 (!DirectAccess.StaticLDSGlobals.contains(GV) &&
555 !DirectAccess.DynamicLDSGlobals.contains(GV)))
559 UniqueLDSGlobals.
insert(GV);
560 auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
561 assert(Indices.size() == 3);
563 ConstantInt::get(
Int32Ty, Indices[1]),
564 ConstantInt::get(
Int32Ty, Indices[2])};
566 SwLDSMetadataStructType, SwLDSMetadata, GEPIdx,
true);
568 Value *BasePlusOffset =
572 replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
575 ReplaceLDSGlobalUses(DirectAccess.StaticLDSGlobals);
576 ReplaceLDSGlobalUses(IndirectAccess.StaticLDSGlobals);
577 ReplaceLDSGlobalUses(DirectAccess.DynamicLDSGlobals);
578 ReplaceLDSGlobalUses(IndirectAccess.DynamicLDSGlobals);
581void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
584 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
589 assert(SwLDS && SwLDSMetadata);
594 Value *MaxAlignValueMinusOne = IRB.
getInt32(MaxAlignment - 1);
597 auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
604 MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2Offset});
610 {Index0, Index1, Index2Size});
616 MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2AlignedSize});
618 Value *AlignedDynLDSSize =
619 IRB.
CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
620 AlignedDynLDSSize = IRB.
CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
621 AlignedDynLDSSize = IRB.
CreateMul(AlignedDynLDSSize, MaxAlignValue);
622 IRB.
CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
625 *CurrMallocSize = IRB.
CreateAdd(*CurrMallocSize, AlignedDynLDSSize);
639void AMDGPUSwLowerLDS::getLDSMemoryInstructions(
645 LDSInstructions.
insert(&Inst);
648 LDSInstructions.
insert(&Inst);
651 LDSInstructions.
insert(&Inst);
654 LDSInstructions.
insert(&Inst);
658 LDSInstructions.
insert(&Inst);
665Value *AMDGPUSwLowerLDS::getTranslatedGlobalMemoryPtrOfLDS(
Value *LoadMallocPtr,
667 assert(LDSPtr &&
"Invalid LDS pointer operand");
681void AMDGPUSwLowerLDS::translateLDSMemoryOperationsToGlobalMemory(
684 LLVM_DEBUG(
dbgs() <<
"Translating LDS memory operations to global memory : "
689 Value *LIOperand = LI->getPointerOperand();
691 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, LIOperand);
693 LI->getAlign(), LI->isVolatile());
694 NewLI->
setAtomic(LI->getOrdering(), LI->getSyncScopeID());
695 AsanInfo.Instructions.
insert(NewLI);
696 LI->replaceAllUsesWith(NewLI);
697 LI->eraseFromParent();
699 Value *SIOperand =
SI->getPointerOperand();
701 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, SIOperand);
703 SI->getValueOperand(), Replacement,
SI->getAlign(),
SI->isVolatile());
705 AsanInfo.Instructions.
insert(NewSI);
706 SI->replaceAllUsesWith(NewSI);
707 SI->eraseFromParent();
709 Value *RMWPtrOperand = RMW->getPointerOperand();
710 Value *RMWValOperand = RMW->getValOperand();
712 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, RMWPtrOperand);
714 RMW->getOperation(), Replacement, RMWValOperand, RMW->getAlign(),
715 RMW->getOrdering(), RMW->getSyncScopeID());
717 AsanInfo.Instructions.
insert(NewRMW);
718 RMW->replaceAllUsesWith(NewRMW);
719 RMW->eraseFromParent();
721 Value *XCHGPtrOperand = XCHG->getPointerOperand();
723 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, XCHGPtrOperand);
725 Replacement, XCHG->getCompareOperand(), XCHG->getNewValOperand(),
726 XCHG->getAlign(), XCHG->getSuccessOrdering(),
727 XCHG->getFailureOrdering(), XCHG->getSyncScopeID());
729 AsanInfo.Instructions.
insert(NewXCHG);
730 XCHG->replaceAllUsesWith(NewXCHG);
731 XCHG->eraseFromParent();
733 Value *AIOperand = ASC->getPointerOperand();
735 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, AIOperand);
741 ASC->eraseFromParent();
747void AMDGPUSwLowerLDS::poisonRedzones(
Function *Func,
Value *MallocPtr) {
748 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
752 "__asan_poison_region",
755 auto RedzonesVec = LDSParams.RedzoneOffsetAndSizeVector;
756 size_t VecSize = RedzonesVec.size();
757 for (
unsigned i = 0; i < VecSize; i++) {
758 auto &RedzonePair = RedzonesVec[i];
759 uint64_t RedzoneOffset = RedzonePair.first;
760 uint64_t RedzoneSize = RedzonePair.second;
762 IRB.
getInt8Ty(), MallocPtr, {IRB.getInt64(RedzoneOffset)});
765 {RedzoneAddress, IRB.
getInt64(RedzoneSize)});
769void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(
Function *Func,
771 LLVM_DEBUG(
dbgs() <<
"Sw Lowering Kernel LDS for : " << Func->getName());
772 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
773 auto &Ctx = M.getContext();
774 auto *PrevEntryBlock = &Func->getEntryBlock();
776 getLDSMemoryInstructions(Func, LDSInstructions);
790 auto SplitIt = PrevEntryBlock->getFirstNonPHIOrDbgOrAlloca();
791 WIdBlock->splice(WIdBlock->end(), PrevEntryBlock, PrevEntryBlock->begin(),
796 AI->moveBefore(*WIdBlock, WIdBlock->end());
811 IRB.
CreateCondBr(WIdzCond, MallocBlock, PrevEntryBlock);
821 assert(SwLDS && SwLDSMetadata);
825 Value *CurrMallocSize;
831 for (
auto &GV : LDSGlobals) {
834 UniqueLDSGlobals.
insert(GV);
838 GetUniqueLDSGlobals(LDSParams.DirectAccess.StaticLDSGlobals);
839 GetUniqueLDSGlobals(LDSParams.IndirectAccess.StaticLDSGlobals);
840 unsigned NumStaticLDS = 1 + UniqueLDSGlobals.
size();
841 UniqueLDSGlobals.
clear();
844 auto *GEPForEndStaticLDSOffset =
847 ConstantInt::get(
Int32Ty, NumStaticLDS - 1),
848 ConstantInt::get(
Int32Ty, 0)});
850 auto *GEPForEndStaticLDSSize =
853 ConstantInt::get(
Int32Ty, NumStaticLDS - 1),
854 ConstantInt::get(
Int32Ty, 2)});
856 Value *EndStaticLDSOffset =
859 CurrMallocSize = IRB.
CreateAdd(EndStaticLDSOffset, EndStaticLDSSize);
861 CurrMallocSize = IRB.
getInt32(MallocSize);
863 if (LDSParams.SwDynLDS) {
866 "Dynamic LDS size query is only supported for CO V5 and later.");
872 {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)});
873 UniqueLDSGlobals.
clear();
874 GetUniqueLDSGlobals(LDSParams.DirectAccess.DynamicLDSGlobals);
875 GetUniqueLDSGlobals(LDSParams.IndirectAccess.DynamicLDSGlobals);
876 updateMallocSizeForDynamicLDS(Func, &CurrMallocSize, HiddenDynLDSSize,
880 CurrMallocSize = IRB.
CreateZExt(CurrMallocSize, Int64Ty);
885 Intrinsic::returnaddress, IRB.
getPtrTy(
DL.getProgramAddressSpace()),
891 Value *MallocCall = IRB.
CreateCall(MallocFunc, {CurrMallocSize, RAPtrToInt});
900 poisonRedzones(Func, MallocPtr);
908 auto *XYZCondPhi = IRB.
CreatePHI(Int1Ty, 2,
"xyzCond");
910 XYZCondPhi->addIncoming(IRB.
getInt1(1), MallocBlock);
915 Value *LoadMallocPtr =
919 replaceKernelLDSAccesses(Func);
923 translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
932 RI->eraseFromParent();
952 Intrinsic::returnaddress, IRB.
getPtrTy(
DL.getProgramAddressSpace()),
956 IRB.
CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt});
970Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
973 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
977 auto *SwLDSMetadataStructType =
983 for (
auto *GV : Variables) {
984 auto It = LDSParams.LDSToReplacementIndicesMap.find(GV);
985 if (It == LDSParams.LDSToReplacementIndicesMap.end()) {
990 auto &Indices = It->second;
992 ConstantInt::get(
Int32Ty, Indices[1]),
993 ConstantInt::get(
Int32Ty, Indices[2])};
995 SwLDSMetadata, GEPIdx,
true);
996 Elements.push_back(
GEP);
1001void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
1002 NonKernelLDSParameters &NKLDSParams) {
1006 auto &Kernels = NKLDSParams.OrderedKernels;
1007 if (Kernels.empty())
1009 const size_t NumberKernels = Kernels.size();
1012 std::vector<Constant *> OverallConstantExprElts(NumberKernels);
1013 for (
size_t i = 0; i < NumberKernels; i++) {
1015 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
1016 OverallConstantExprElts[i] = LDSParams.SwLDS;
1029void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
1030 NonKernelLDSParameters &NKLDSParams) {
1038 auto &Variables = NKLDSParams.OrdereLDSGlobals;
1039 auto &Kernels = NKLDSParams.OrderedKernels;
1040 if (Variables.
empty() || Kernels.empty())
1042 const size_t NumberVariables = Variables.
size();
1043 const size_t NumberKernels = Kernels.size();
1050 std::vector<Constant *> overallConstantExprElts(NumberKernels);
1051 for (
size_t i = 0; i < NumberKernels; i++) {
1053 overallConstantExprElts[i] =
1054 getAddressesOfVariablesInKernel(Func, Variables);
1067void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
1069 NonKernelLDSParameters &NKLDSParams) {
1072 LLVM_DEBUG(
dbgs() <<
"Sw LDS lowering, lower non-kernel access for : "
1073 << Func->getName());
1074 auto InsertAt = Func->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
1079 getLDSMemoryInstructions(Func, LDSInstructions);
1081 auto *KernelId = IRB.
CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {});
1084 auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals;
1086 LDSBaseTable->
getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
1089 Value *LoadMallocPtr =
1093 const auto *GVIt =
llvm::find(OrdereLDSGlobals, GV);
1094 assert(GVIt != OrdereLDSGlobals.end());
1095 uint32_t GVOffset = std::distance(OrdereLDSGlobals.begin(), GVIt);
1099 {IRB.getInt32(0), KernelId, IRB.getInt32(GVOffset)});
1103 Value *BasePlusOffset =
1105 LLVM_DEBUG(
dbgs() <<
"Sw LDS Lowering, Replace non-kernel LDS for "
1107 replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
1109 translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
1113static void reorderStaticDynamicIndirectLDSSet(KernelLDSParameters &LDSParams) {
1116 auto &DirectAccess = LDSParams.DirectAccess;
1117 auto &IndirectAccess = LDSParams.IndirectAccess;
1118 LDSParams.DirectAccess.StaticLDSGlobals = sortByName(
1119 std::vector<GlobalVariable *>(DirectAccess.StaticLDSGlobals.begin(),
1120 DirectAccess.StaticLDSGlobals.end()));
1121 LDSParams.DirectAccess.DynamicLDSGlobals = sortByName(
1122 std::vector<GlobalVariable *>(DirectAccess.DynamicLDSGlobals.begin(),
1123 DirectAccess.DynamicLDSGlobals.end()));
1124 LDSParams.IndirectAccess.StaticLDSGlobals = sortByName(
1125 std::vector<GlobalVariable *>(IndirectAccess.StaticLDSGlobals.begin(),
1126 IndirectAccess.StaticLDSGlobals.end()));
1127 LDSParams.IndirectAccess.DynamicLDSGlobals = sortByName(
1128 std::vector<GlobalVariable *>(IndirectAccess.DynamicLDSGlobals.begin(),
1129 IndirectAccess.DynamicLDSGlobals.end()));
1132void AMDGPUSwLowerLDS::initAsanInfo() {
1138 bool OrShadowOffset;
1140 &
Offset, &Scale, &OrShadowOffset);
1141 AsanInfo.Scale = Scale;
1142 AsanInfo.Offset =
Offset;
1146 for (
auto &K : LDSAccesses) {
1150 if (
F->hasFnAttribute(Attribute::SanitizeAddress))
1156bool AMDGPUSwLowerLDS::run() {
1168 bool LowerAllLDS = hasFnWithSanitizeAddressAttr(LDSUsesInfo.
direct_access) ||
1176 bool DirectAccess) {
1177 for (
auto &K : LDSAccesses) {
1179 if (!
F || K.second.empty())
1185 FuncLDSAccessInfo.KernelToLDSParametersMap.insert(
1186 {
F, KernelLDSParameters()});
1188 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[
F];
1190 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess.
insert(
F);
1192 if (!DirectAccess) {
1194 LDSParams.IndirectAccess.DynamicLDSGlobals.insert(GV);
1196 LDSParams.IndirectAccess.StaticLDSGlobals.insert(GV);
1197 FuncLDSAccessInfo.AllNonKernelLDSAccess.insert(GV);
1200 LDSParams.DirectAccess.DynamicLDSGlobals.insert(GV);
1202 LDSParams.DirectAccess.StaticLDSGlobals.insert(GV);
1208 PopulateKernelStaticDynamicLDS(LDSUsesInfo.
direct_access,
true);
1214 for (
auto &K : FuncLDSAccessInfo.KernelToLDSParametersMap) {
1216 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
1217 if (LDSParams.DirectAccess.StaticLDSGlobals.empty() &&
1218 LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
1219 LDSParams.IndirectAccess.StaticLDSGlobals.empty() &&
1220 LDSParams.IndirectAccess.DynamicLDSGlobals.empty()) {
1225 {
"amdgpu-no-workitem-id-x",
"amdgpu-no-workitem-id-y",
1226 "amdgpu-no-workitem-id-z",
"amdgpu-no-heap-ptr"});
1227 if (!LDSParams.IndirectAccess.StaticLDSGlobals.empty() ||
1228 !LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
1230 reorderStaticDynamicIndirectLDSSet(LDSParams);
1231 buildSwLDSGlobal(Func);
1232 buildSwDynLDSGlobal(Func);
1233 populateSwMetadataGlobal(Func);
1234 populateSwLDSAttributeAndMetadata(Func);
1235 populateLDSToReplacementIndicesMap(Func);
1237 DomTreeUpdater::UpdateStrategy::Lazy);
1238 lowerKernelLDSAccesses(Func, DTU);
1244 getUsesOfLDSByNonKernels();
1247 getNonKernelsWithLDSArguments(CG);
1250 if (!FuncLDSAccessInfo.NonKernelToLDSAccessMap.empty() ||
1251 !FuncLDSAccessInfo.NonKernelsWithLDSArgument.
empty()) {
1252 NonKernelLDSParameters NKLDSParams;
1253 NKLDSParams.OrderedKernels = getOrderedIndirectLDSAccessingKernels(
1254 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess);
1255 NKLDSParams.OrdereLDSGlobals = getOrderedNonKernelAllLDSGlobals(
1256 FuncLDSAccessInfo.AllNonKernelLDSAccess);
1257 buildNonKernelLDSBaseTable(NKLDSParams);
1258 buildNonKernelLDSOffsetTable(NKLDSParams);
1259 for (
auto &K : FuncLDSAccessInfo.NonKernelToLDSAccessMap) {
1263 std::vector<GlobalVariable *>(LDSGlobals.
begin(), LDSGlobals.
end()));
1264 lowerNonKernelLDSAccesses(Func, OrderedLDSGlobals, NKLDSParams);
1266 for (
Function *Func : FuncLDSAccessInfo.NonKernelsWithLDSArgument) {
1267 auto &K = FuncLDSAccessInfo.NonKernelToLDSAccessMap;
1268 if (K.contains(Func))
1271 lowerNonKernelLDSAccesses(Func, Vec, NKLDSParams);
1288 if (AsanInstrumentLDS) {
1295 for (
auto &Operand : OperandsToInstrument) {
1296 Value *Addr = Operand.getPtr();
1298 Operand.Alignment.valueOrOne(), Operand.TypeStoreSize,
1299 Operand.IsWrite,
nullptr,
false,
false, AsanInfo.Scale,
1308class AMDGPUSwLowerLDSLegacy :
public ModulePass {
1314 bool runOnModule(
Module &M)
override;
1321char AMDGPUSwLowerLDSLegacy::ID = 0;
1325 "AMDGPU Software lowering of LDS",
false,
false)
1330bool AMDGPUSwLowerLDSLegacy::runOnModule(
Module &M) {
1333 if (!M.getModuleFlag(
"nosanitize_address"))
1336 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1338 return DTW ? &DTW->getDomTree() : nullptr;
1341 auto &TPC = getAnalysis<TargetPassConfig>();
1344 AMDGPUSwLowerLDS SwLowerLDSImpl(M, *AMDGPUTM, DTCallback);
1345 bool IsChanged = SwLowerLDSImpl.run();
1351 return new AMDGPUSwLowerLDSLegacy(TM);
1358 if (!M.getModuleFlag(
"nosanitize_address"))
1364 AMDGPUSwLowerLDS SwLowerLDSImpl(M,
TM, DTCallback);
1365 bool IsChanged = SwLowerLDSImpl.run();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file implements a set that has insertion order iteration characteristics.
Target-Independent Code Generator Pass Configuration Options pass.
static DebugLoc getOrCreateDebugLoc(const Instruction *InsertBefore, DISubprogram *SP)
This class represents a conversion between pointers from one address space to another.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
an instruction that atomically reads a memory location, combines it with another value,...
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
LLVM Basic Block Representation.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
A node in the call graph for a module.
Function * getFunction() const
Returns the function that this call graph node represents.
The basic data container for the call graph of a Module of IR.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Subprogram description. Uses SubclassData1.
A parsed version of the target data layout string in and methods for querying it.
Implements a dense probed hash-table based set.
Analysis pass which computes a DominatorTree.
static constexpr UpdateKind Insert
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LLVM_ABI void setSanitizerMetadata(SanitizerMetadata Meta)
@ InternalLinkage
Rename collisions when linking (static functions).
@ ExternalLinkage
Externally visible function.
Type * getValueType() const
uint64_t getAlignment() const
FIXME: Remove this function once transition to Align is over.
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
iterator end()
Get an iterator to the end of the SetVector.
void clear()
Completely clear the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
const Triple & getTargetTriple() const
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
An efficient, type-erasing, non-owning reference to a callable.
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
StringRef str() const
Return a StringRef for the vector contents.
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
bool isDynamicLDS(const GlobalVariable &GV)
unsigned getAMDHSACodeObjectVersion(const Module &M)
void removeFnAttrFromReachable(CallGraph &CG, Function *KernelRoot, ArrayRef< StringRef > FnAttrs)
Strip FnAttr attribute from any functions where we may have introduced its use.
LLVM_READNONE constexpr bool isKernel(CallingConv::ID CC)
LDSUsesInfoTy getTransitiveUsesOfLDS(const CallGraph &CG, Module &M)
DenseMap< Function *, DenseSet< GlobalVariable * > > FunctionVariableMap
bool isLDSVariableToLower(const GlobalVariable &GV)
bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M)
Align getAlign(const DataLayout &DL, const GlobalVariable *GV)
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
char & AMDGPUSwLowerLDSLegacyPassID
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
const AMDGPUTargetMachine & TM
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
FunctionVariableMap direct_access
FunctionVariableMap indirect_access
This struct is a compact representation of a valid (non-zero power of two) alignment.