LLVM 23.0.0git
AMDGPUSwLowerLDS.cpp
Go to the documentation of this file.
1//===-- AMDGPUSwLowerLDS.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers the local data store, LDS, uses in kernel and non-kernel
10// functions in module to use dynamically allocated global memory.
11// Packed LDS Layout is emulated in the global memory.
12// The lowered memory instructions from LDS to global memory are then
13// instrumented for address sanitizer, to catch addressing errors.
14// This pass only work when address sanitizer has been enabled and has
15// instrumented the IR. It identifies that IR has been instrumented using
16// "nosanitize_address" module flag.
17//
18// Replacement of Kernel LDS accesses:
19// For a kernel, LDS access can be static or dynamic which are direct
20// (accessed within kernel) and indirect (accessed through non-kernels).
21// All these LDS accesses corresponding to kernel will be packed together,
22// where all static LDS accesses will be allocated first and then dynamic
23// LDS follows. The total size with alignment is calculated. A new LDS global
24// will be created for the kernel called "SW LDS" and it will have the
25// attribute "amdgpu-lds-size" attached with value of the size calculated.
26// All the LDS accesses in the module will be replaced by GEP with offset
27// into the "Sw LDS".
28// A new "llvm.amdgcn.<kernel>.dynlds" is created per kernel accessing
29// the dynamic LDS. This will be marked used by kernel and will have
30// MD_absolue_symbol metadata set to total static LDS size, Since dynamic
31// LDS allocation starts after all static LDS allocation.
32//
33// A device global memory equal to the total LDS size will be allocated.
34// At the prologue of the kernel, a single work-item from the
35// work-group, does a "malloc" and stores the pointer of the
36// allocation in "SW LDS".
37//
38// To store the offsets corresponding to all LDS accesses, another global
39// variable is created which will be called "SW LDS metadata" in this pass.
40// - SW LDS Global:
41// It is LDS global of ptr type with name
42// "llvm.amdgcn.sw.lds.<kernel-name>".
43// - Metadata Global:
44// It is of struct type, with n members. n equals the number of LDS
45// globals accessed by the kernel(direct and indirect). Each member of
46// struct is another struct of type {i32, i32, i32}. First member
47// corresponds to offset, second member corresponds to size of LDS global
48// being replaced and third represents the total aligned size. It will
49// have name "llvm.amdgcn.sw.lds.<kernel-name>.md". This global will have
50// an initializer with static LDS related offsets and sizes initialized.
51// But for dynamic LDS related entries, offsets will be initialized to
52// previous static LDS allocation end offset. Sizes for them will be zero
53// initially. These dynamic LDS offset and size values will be updated
54// within the kernel, since kernel can read the dynamic LDS size
55// allocation done at runtime with query to "hidden_dynamic_lds_size"
56// hidden kernel argument.
57//
58// At the epilogue of kernel, allocated memory would be made free by the same
59// single work-item.
60//
61// Replacement of non-kernel LDS accesses:
62// Multiple kernels can access the same non-kernel function.
63// All the kernels accessing LDS through non-kernels are sorted and
64// assigned a kernel-id. All the LDS globals accessed by non-kernels
65// are sorted. This information is used to build two tables:
66// - Base table:
67// Base table will have single row, with elements of the row
68// placed as per kernel ID. Each element in the row corresponds
69// to ptr of "SW LDS" variable created for that kernel.
70// - Offset table:
71// Offset table will have multiple rows and columns.
72// Rows are assumed to be from 0 to (n-1). n is total number
73// of kernels accessing the LDS through non-kernels.
74// Each row will have m elements. m is the total number of
75// unique LDS globals accessed by all non-kernels.
76// Each element in the row correspond to the ptr of
77// the replacement of LDS global done by that particular kernel.
78// A LDS variable in non-kernel will be replaced based on the information
79// from base and offset tables. Based on kernel-id query, ptr of "SW
80// LDS" for that corresponding kernel is obtained from base table.
81// The Offset into the base "SW LDS" is obtained from
82// corresponding element in offset table. With this information, replacement
83// value is obtained.
84//===----------------------------------------------------------------------===//
85
86#include "AMDGPU.h"
88#include "AMDGPUMemoryUtils.h"
89#include "AMDGPUTargetMachine.h"
90#include "llvm/ADT/DenseMap.h"
91#include "llvm/ADT/DenseSet.h"
92#include "llvm/ADT/SetVector.h"
94#include "llvm/ADT/StringRef.h"
98#include "llvm/IR/Constants.h"
99#include "llvm/IR/DIBuilder.h"
100#include "llvm/IR/DebugInfo.h"
102#include "llvm/IR/IRBuilder.h"
103#include "llvm/IR/Instructions.h"
104#include "llvm/IR/IntrinsicsAMDGPU.h"
105#include "llvm/IR/MDBuilder.h"
107#include "llvm/Pass.h"
111
112#include <algorithm>
113
114#define DEBUG_TYPE "amdgpu-sw-lower-lds"
115#define COV5_HIDDEN_DYN_LDS_SIZE_ARG 15
116
117using namespace llvm;
118using namespace AMDGPU;
119
120namespace {
121
123 AsanInstrumentLDS("amdgpu-asan-instrument-lds",
124 cl::desc("Run asan instrumentation on LDS instructions "
125 "lowered to global memory"),
126 cl::init(true), cl::Hidden);
127
128using DomTreeCallback = function_ref<DominatorTree *(Function &F)>;
129
130struct LDSAccessTypeInfo {
131 SetVector<GlobalVariable *> StaticLDSGlobals;
132 SetVector<GlobalVariable *> DynamicLDSGlobals;
133};
134
135// Struct to hold all the Metadata required for a kernel
136// to replace a LDS global uses with corresponding offset
137// in to device global memory.
138struct KernelLDSParameters {
139 GlobalVariable *SwLDS = nullptr;
140 GlobalVariable *SwDynLDS = nullptr;
141 GlobalVariable *SwLDSMetadata = nullptr;
142 LDSAccessTypeInfo DirectAccess;
143 LDSAccessTypeInfo IndirectAccess;
145 LDSToReplacementIndicesMap;
146 uint32_t MallocSize = 0;
147 uint32_t LDSSize = 0;
148 SmallVector<std::pair<uint32_t, uint32_t>, 64> RedzoneOffsetAndSizeVector;
149};
150
151// Struct to store information for creation of offset table
152// for all the non-kernel LDS accesses.
153struct NonKernelLDSParameters {
154 GlobalVariable *LDSBaseTable = nullptr;
155 GlobalVariable *LDSOffsetTable = nullptr;
156 SetVector<Function *> OrderedKernels;
157 SetVector<GlobalVariable *> OrdereLDSGlobals;
158};
159
160struct AsanInstrumentInfo {
161 int Scale = 0;
162 uint32_t Offset = 0;
163 SetVector<Instruction *> Instructions;
164};
165
166struct FunctionsAndLDSAccess {
167 DenseMap<Function *, KernelLDSParameters> KernelToLDSParametersMap;
168 SetVector<Function *> KernelsWithIndirectLDSAccess;
169 SetVector<Function *> NonKernelsWithLDSArgument;
170 SetVector<GlobalVariable *> AllNonKernelLDSAccess;
171 FunctionVariableMap NonKernelToLDSAccessMap;
172};
173
174class AMDGPUSwLowerLDS {
175public:
176 AMDGPUSwLowerLDS(Module &Mod, const AMDGPUTargetMachine &TM,
177 DomTreeCallback Callback)
178 : M(Mod), AMDGPUTM(TM), IRB(M.getContext()), DTCallback(Callback) {}
179 bool run();
180 void getUsesOfLDSByNonKernels();
181 void getNonKernelsWithLDSArguments(const CallGraph &CG);
183 getOrderedIndirectLDSAccessingKernels(SetVector<Function *> &Kernels);
185 getOrderedNonKernelAllLDSGlobals(SetVector<GlobalVariable *> &Variables);
186 void buildSwLDSGlobal(Function *Func);
187 void buildSwDynLDSGlobal(Function *Func);
188 void populateSwMetadataGlobal(Function *Func);
189 void populateSwLDSAttributeAndMetadata(Function *Func);
190 void populateLDSToReplacementIndicesMap(Function *Func);
191 void getLDSMemoryInstructions(Function *Func,
192 SetVector<Instruction *> &LDSInstructions);
193 void replaceKernelLDSAccesses(Function *Func);
194 Value *getTranslatedGlobalMemoryPtrOfLDS(Value *LoadMallocPtr, Value *LDSPtr);
195 void translateLDSMemoryOperationsToGlobalMemory(
196 Function *Func, Value *LoadMallocPtr,
197 SetVector<Instruction *> &LDSInstructions);
198 void poisonRedzones(Function *Func, Value *MallocPtr);
199 void lowerKernelLDSAccesses(Function *Func, DomTreeUpdater &DTU);
200 void buildNonKernelLDSOffsetTable(NonKernelLDSParameters &NKLDSParams);
201 void buildNonKernelLDSBaseTable(NonKernelLDSParameters &NKLDSParams);
202 Constant *
203 getAddressesOfVariablesInKernel(Function *Func,
204 SetVector<GlobalVariable *> &Variables);
205 void lowerNonKernelLDSAccesses(Function *Func,
206 SetVector<GlobalVariable *> &LDSGlobals,
207 NonKernelLDSParameters &NKLDSParams);
208 void
209 updateMallocSizeForDynamicLDS(Function *Func, Value **CurrMallocSize,
210 Value *HiddenDynLDSSize,
211 SetVector<GlobalVariable *> &DynamicLDSGlobals);
212 void initAsanInfo();
213
214private:
215 Module &M;
216 const AMDGPUTargetMachine &AMDGPUTM;
217 IRBuilder<> IRB;
218 DomTreeCallback DTCallback;
219 FunctionsAndLDSAccess FuncLDSAccessInfo;
220 AsanInstrumentInfo AsanInfo;
221};
222
223template <typename T> SetVector<T> sortByName(std::vector<T> &&V) {
224 // Sort the vector of globals or Functions based on their name.
225 // Returns a SetVector of globals/Functions.
226 sort(V, [](const auto *L, const auto *R) {
227 return L->getName() < R->getName();
228 });
229 return {SetVector<T>(llvm::from_range, V)};
230}
231
232SetVector<GlobalVariable *> AMDGPUSwLowerLDS::getOrderedNonKernelAllLDSGlobals(
233 SetVector<GlobalVariable *> &Variables) {
234 // Sort all the non-kernel LDS accesses based on their name.
235 return sortByName(
236 std::vector<GlobalVariable *>(Variables.begin(), Variables.end()));
237}
238
239SetVector<Function *> AMDGPUSwLowerLDS::getOrderedIndirectLDSAccessingKernels(
240 SetVector<Function *> &Kernels) {
241 // Sort the non-kernels accessing LDS based on their name.
242 // Also assign a kernel ID metadata based on the sorted order.
243 LLVMContext &Ctx = M.getContext();
244 if (Kernels.size() > UINT32_MAX) {
245 report_fatal_error("Unimplemented SW LDS lowering for > 2**32 kernels");
246 }
247 SetVector<Function *> OrderedKernels =
248 sortByName(std::vector<Function *>(Kernels.begin(), Kernels.end()));
249 for (size_t i = 0; i < Kernels.size(); i++) {
250 Metadata *AttrMDArgs[1] = {
252 };
253 Function *Func = OrderedKernels[i];
254 Func->setMetadata("llvm.amdgcn.lds.kernel.id",
255 MDNode::get(Ctx, AttrMDArgs));
256 }
257 return OrderedKernels;
258}
259
260void AMDGPUSwLowerLDS::getNonKernelsWithLDSArguments(const CallGraph &CG) {
261 // Among the kernels accessing LDS, get list of
262 // Non-kernels to which a call is made and a ptr
263 // to addrspace(3) is passed as argument.
264 for (auto &K : FuncLDSAccessInfo.KernelToLDSParametersMap) {
265 Function *Func = K.first;
266 const CallGraphNode *CGN = CG[Func];
267 if (!CGN)
268 continue;
269 for (auto &I : *CGN) {
270 CallGraphNode *CallerCGN = I.second;
271 Function *CalledFunc = CallerCGN->getFunction();
272 if (!CalledFunc || CalledFunc->isDeclaration())
273 continue;
274 if (AMDGPU::isKernel(*CalledFunc))
275 continue;
276 for (auto AI = CalledFunc->arg_begin(), E = CalledFunc->arg_end();
277 AI != E; ++AI) {
278 Type *ArgTy = (*AI).getType();
279 if (!ArgTy->isPointerTy())
280 continue;
282 continue;
283 FuncLDSAccessInfo.NonKernelsWithLDSArgument.insert(CalledFunc);
284 // Also add the Calling function to KernelsWithIndirectLDSAccess list
285 // so that base table of LDS is generated.
286 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess.insert(Func);
287 }
288 }
289 }
290}
291
292void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels() {
293 for (GlobalVariable *GV : FuncLDSAccessInfo.AllNonKernelLDSAccess) {
295 continue;
296
297 for (User *V : GV->users()) {
298 if (auto *I = dyn_cast<Instruction>(V)) {
299 Function *F = I->getFunction();
300 if (!isKernel(*F) && !F->isDeclaration())
301 FuncLDSAccessInfo.NonKernelToLDSAccessMap[F].insert(GV);
302 }
303 }
304 }
305}
306
307static void recordLDSAbsoluteAddress(Module &M, GlobalVariable *GV,
308 uint32_t Address) {
309 // Write the specified address into metadata where it can be retrieved by
310 // the assembler. Format is a half open range, [Address Address+1)
311 LLVMContext &Ctx = M.getContext();
312 auto *IntTy = M.getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
313 MDBuilder MDB(Ctx);
314 MDNode *MetadataNode = MDB.createRange(ConstantInt::get(IntTy, Address),
315 ConstantInt::get(IntTy, Address + 1));
316 GV->setMetadata(LLVMContext::MD_absolute_symbol, MetadataNode);
317}
318
319static void addLDSSizeAttribute(Function *Func, uint32_t Offset,
320 bool IsDynLDS) {
321 if (Offset != 0) {
322 std::string Buffer;
323 raw_string_ostream SS{Buffer};
324 SS << Offset;
325 if (IsDynLDS)
326 SS << "," << Offset;
327 Func->addFnAttr("amdgpu-lds-size", Buffer);
328 }
329}
330
331static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
332 BasicBlock *Entry = &Func->getEntryBlock();
333 IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
334
335 Function *Decl = Intrinsic::getOrInsertDeclaration(Func->getParent(),
336 Intrinsic::donothing, {});
337
338 Value *UseInstance[1] = {
339 Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
340
341 Builder.CreateCall(Decl, {},
342 {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
343}
344
345void AMDGPUSwLowerLDS::buildSwLDSGlobal(Function *Func) {
346 // Create new LDS global required for each kernel to store
347 // device global memory pointer.
348 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
349 // Create new global pointer variable
350 LDSParams.SwLDS = new GlobalVariable(
351 M, IRB.getPtrTy(), false, GlobalValue::InternalLinkage,
352 PoisonValue::get(IRB.getPtrTy()), "llvm.amdgcn.sw.lds." + Func->getName(),
355 MD.NoAddress = true;
356 LDSParams.SwLDS->setSanitizerMetadata(MD);
357}
358
359void AMDGPUSwLowerLDS::buildSwDynLDSGlobal(Function *Func) {
360 // Create new Dyn LDS global if kernel accesses dyn LDS.
361 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
362 if (LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
363 LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
364 return;
365 // Create new global pointer variable
366 auto *emptyCharArray = ArrayType::get(IRB.getInt8Ty(), 0);
367 LDSParams.SwDynLDS = new GlobalVariable(
368 M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
369 "llvm.amdgcn." + Func->getName() + ".dynlds", nullptr,
371 markUsedByKernel(Func, LDSParams.SwDynLDS);
373 MD.NoAddress = true;
374 LDSParams.SwDynLDS->setSanitizerMetadata(MD);
375}
376
377void AMDGPUSwLowerLDS::populateSwLDSAttributeAndMetadata(Function *Func) {
378 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
379 bool IsDynLDSUsed = LDSParams.SwDynLDS;
380 uint32_t Offset = LDSParams.LDSSize;
381 recordLDSAbsoluteAddress(M, LDSParams.SwLDS, 0);
382 addLDSSizeAttribute(Func, Offset, IsDynLDSUsed);
383 if (LDSParams.SwDynLDS)
384 recordLDSAbsoluteAddress(M, LDSParams.SwDynLDS, Offset);
385}
386
387void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
388 // Create new metadata global for every kernel and initialize the
389 // start offsets and sizes corresponding to each LDS accesses.
390 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
391 auto &Ctx = M.getContext();
392 auto &DL = M.getDataLayout();
393 std::vector<Type *> Items;
394 Type *Int32Ty = IRB.getInt32Ty();
395 std::vector<Constant *> Initializers;
396 Align MaxAlignment(1);
397 auto UpdateMaxAlignment = [&MaxAlignment, &DL](GlobalVariable *GV) {
398 Align GVAlign = AMDGPU::getAlign(DL, GV);
399 MaxAlignment = std::max(MaxAlignment, GVAlign);
400 };
401
402 for (GlobalVariable *GV : LDSParams.DirectAccess.StaticLDSGlobals)
403 UpdateMaxAlignment(GV);
404
405 for (GlobalVariable *GV : LDSParams.DirectAccess.DynamicLDSGlobals)
406 UpdateMaxAlignment(GV);
407
408 for (GlobalVariable *GV : LDSParams.IndirectAccess.StaticLDSGlobals)
409 UpdateMaxAlignment(GV);
410
411 for (GlobalVariable *GV : LDSParams.IndirectAccess.DynamicLDSGlobals)
412 UpdateMaxAlignment(GV);
413
414 //{StartOffset, AlignedSizeInBytes}
415 SmallString<128> MDItemStr;
416 raw_svector_ostream MDItemOS(MDItemStr);
417 MDItemOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.item";
418
419 StructType *LDSItemTy =
420 StructType::create(Ctx, {Int32Ty, Int32Ty, Int32Ty}, MDItemOS.str());
421 uint32_t &MallocSize = LDSParams.MallocSize;
422 SetVector<GlobalVariable *> UniqueLDSGlobals;
423 int AsanScale = AsanInfo.Scale;
424 auto buildInitializerForSwLDSMD =
425 [&](SetVector<GlobalVariable *> &LDSGlobals) {
426 for (auto &GV : LDSGlobals) {
427 if (is_contained(UniqueLDSGlobals, GV))
428 continue;
429 UniqueLDSGlobals.insert(GV);
430
431 Type *Ty = GV->getValueType();
432 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
433 Items.push_back(LDSItemTy);
434 Constant *ItemStartOffset = ConstantInt::get(Int32Ty, MallocSize);
435 Constant *SizeInBytesConst = ConstantInt::get(Int32Ty, SizeInBytes);
436 // Get redzone size corresponding a size.
437 const uint64_t RightRedzoneSize =
438 AMDGPU::getRedzoneSizeForGlobal(AsanScale, SizeInBytes);
439 // Update MallocSize with current size and redzone size.
440 MallocSize += SizeInBytes;
441 if (!AMDGPU::isDynamicLDS(*GV))
442 LDSParams.RedzoneOffsetAndSizeVector.emplace_back(MallocSize,
443 RightRedzoneSize);
444 MallocSize += RightRedzoneSize;
445 // Align current size plus redzone.
446 uint64_t AlignedSize =
447 alignTo(SizeInBytes + RightRedzoneSize, MaxAlignment);
448 Constant *AlignedSizeInBytesConst =
449 ConstantInt::get(Int32Ty, AlignedSize);
450 // Align MallocSize
451 MallocSize = alignTo(MallocSize, MaxAlignment);
452 Constant *InitItem =
453 ConstantStruct::get(LDSItemTy, {ItemStartOffset, SizeInBytesConst,
454 AlignedSizeInBytesConst});
455 Initializers.push_back(InitItem);
456 }
457 };
458 SetVector<GlobalVariable *> SwLDSVector;
459 SwLDSVector.insert(LDSParams.SwLDS);
460 buildInitializerForSwLDSMD(SwLDSVector);
461 buildInitializerForSwLDSMD(LDSParams.DirectAccess.StaticLDSGlobals);
462 buildInitializerForSwLDSMD(LDSParams.IndirectAccess.StaticLDSGlobals);
463 buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
464 buildInitializerForSwLDSMD(LDSParams.IndirectAccess.DynamicLDSGlobals);
465
466 // Update the LDS size used by the kernel.
467 Type *Ty = LDSParams.SwLDS->getValueType();
468 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
469 uint64_t AlignedSize = alignTo(SizeInBytes, MaxAlignment);
470 LDSParams.LDSSize = AlignedSize;
471 SmallString<128> MDTypeStr;
472 raw_svector_ostream MDTypeOS(MDTypeStr);
473 MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.type";
474 StructType *MetadataStructType =
475 StructType::create(Ctx, Items, MDTypeOS.str());
476 SmallString<128> MDStr;
477 raw_svector_ostream MDOS(MDStr);
478 MDOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md";
479 LDSParams.SwLDSMetadata = new GlobalVariable(
480 M, MetadataStructType, false, GlobalValue::InternalLinkage,
481 PoisonValue::get(MetadataStructType), MDOS.str(), nullptr,
483 Constant *data = ConstantStruct::get(MetadataStructType, Initializers);
484 LDSParams.SwLDSMetadata->setInitializer(data);
485 assert(LDSParams.SwLDS);
486 // Set the alignment to MaxAlignment for SwLDS.
487 LDSParams.SwLDS->setAlignment(MaxAlignment);
488 if (LDSParams.SwDynLDS)
489 LDSParams.SwDynLDS->setAlignment(MaxAlignment);
491 MD.NoAddress = true;
492 LDSParams.SwLDSMetadata->setSanitizerMetadata(MD);
493}
494
495void AMDGPUSwLowerLDS::populateLDSToReplacementIndicesMap(Function *Func) {
496 // Fill the corresponding LDS replacement indices for each LDS access
497 // related to this kernel.
498 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
499 SetVector<GlobalVariable *> UniqueLDSGlobals;
500 auto PopulateIndices = [&](SetVector<GlobalVariable *> &LDSGlobals,
501 uint32_t &Idx) {
502 for (auto &GV : LDSGlobals) {
503 if (is_contained(UniqueLDSGlobals, GV))
504 continue;
505 UniqueLDSGlobals.insert(GV);
506 LDSParams.LDSToReplacementIndicesMap[GV] = {0, Idx, 0};
507 ++Idx;
508 }
509 };
510 uint32_t Idx = 0;
511 SetVector<GlobalVariable *> SwLDSVector;
512 SwLDSVector.insert(LDSParams.SwLDS);
513 PopulateIndices(SwLDSVector, Idx);
514 PopulateIndices(LDSParams.DirectAccess.StaticLDSGlobals, Idx);
515 PopulateIndices(LDSParams.IndirectAccess.StaticLDSGlobals, Idx);
516 PopulateIndices(LDSParams.DirectAccess.DynamicLDSGlobals, Idx);
517 PopulateIndices(LDSParams.IndirectAccess.DynamicLDSGlobals, Idx);
518}
519
520static void replacesUsesOfGlobalInFunction(Function *Func, GlobalVariable *GV,
521 Value *Replacement) {
522 // Replace all uses of LDS global in this Function with a Replacement.
523 auto ReplaceUsesLambda = [Func](const Use &U) -> bool {
524 auto *V = U.getUser();
525 if (auto *Inst = dyn_cast<Instruction>(V)) {
526 auto *Func1 = Inst->getFunction();
527 if (Func == Func1)
528 return true;
529 }
530 return false;
531 };
532 GV->replaceUsesWithIf(Replacement, ReplaceUsesLambda);
533}
534
535void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
536 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
537 GlobalVariable *SwLDS = LDSParams.SwLDS;
538 assert(SwLDS);
539 GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
540 assert(SwLDSMetadata);
541 StructType *SwLDSMetadataStructType =
542 cast<StructType>(SwLDSMetadata->getValueType());
543 Type *Int32Ty = IRB.getInt32Ty();
544 auto &IndirectAccess = LDSParams.IndirectAccess;
545 auto &DirectAccess = LDSParams.DirectAccess;
546 // Replace all uses of LDS global in this Function with a Replacement.
547 SetVector<GlobalVariable *> UniqueLDSGlobals;
548 auto ReplaceLDSGlobalUses = [&](SetVector<GlobalVariable *> &LDSGlobals) {
549 for (auto &GV : LDSGlobals) {
550 // Do not generate instructions if LDS access is in non-kernel
551 // i.e indirect-access.
552 if ((IndirectAccess.StaticLDSGlobals.contains(GV) ||
553 IndirectAccess.DynamicLDSGlobals.contains(GV)) &&
554 (!DirectAccess.StaticLDSGlobals.contains(GV) &&
555 !DirectAccess.DynamicLDSGlobals.contains(GV)))
556 continue;
557 if (is_contained(UniqueLDSGlobals, GV))
558 continue;
559 UniqueLDSGlobals.insert(GV);
560 auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
561 assert(Indices.size() == 3);
562 Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Indices[0]),
563 ConstantInt::get(Int32Ty, Indices[1]),
564 ConstantInt::get(Int32Ty, Indices[2])};
566 SwLDSMetadataStructType, SwLDSMetadata, GEPIdx, true);
568 Value *BasePlusOffset =
569 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Offset});
570 LLVM_DEBUG(GV->printAsOperand(dbgs() << "Sw LDS Lowering, Replacing LDS ",
571 false));
572 replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
573 }
574 };
575 ReplaceLDSGlobalUses(DirectAccess.StaticLDSGlobals);
576 ReplaceLDSGlobalUses(IndirectAccess.StaticLDSGlobals);
577 ReplaceLDSGlobalUses(DirectAccess.DynamicLDSGlobals);
578 ReplaceLDSGlobalUses(IndirectAccess.DynamicLDSGlobals);
579}
580
581void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
582 Function *Func, Value **CurrMallocSize, Value *HiddenDynLDSSize,
583 SetVector<GlobalVariable *> &DynamicLDSGlobals) {
584 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
585 Type *Int32Ty = IRB.getInt32Ty();
586
587 GlobalVariable *SwLDS = LDSParams.SwLDS;
588 GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
589 assert(SwLDS && SwLDSMetadata);
590 StructType *MetadataStructType =
591 cast<StructType>(SwLDSMetadata->getValueType());
592 unsigned MaxAlignment = SwLDS->getAlignment();
593 Value *MaxAlignValue = IRB.getInt32(MaxAlignment);
594 Value *MaxAlignValueMinusOne = IRB.getInt32(MaxAlignment - 1);
595
596 for (GlobalVariable *DynGV : DynamicLDSGlobals) {
597 auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
598 // Update the Offset metadata.
599 Constant *Index0 = ConstantInt::get(Int32Ty, 0);
600 Constant *Index1 = ConstantInt::get(Int32Ty, Indices[1]);
601
602 Constant *Index2Offset = ConstantInt::get(Int32Ty, 0);
603 auto *GEPForOffset = IRB.CreateInBoundsGEP(
604 MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2Offset});
605
606 IRB.CreateStore(*CurrMallocSize, GEPForOffset);
607 // Update the size and Aligned Size metadata.
608 Constant *Index2Size = ConstantInt::get(Int32Ty, 1);
609 auto *GEPForSize = IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
610 {Index0, Index1, Index2Size});
611
612 Value *CurrDynLDSSize = IRB.CreateLoad(Int32Ty, HiddenDynLDSSize);
613 IRB.CreateStore(CurrDynLDSSize, GEPForSize);
614 Constant *Index2AlignedSize = ConstantInt::get(Int32Ty, 2);
615 auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
616 MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2AlignedSize});
617
618 Value *AlignedDynLDSSize =
619 IRB.CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
620 AlignedDynLDSSize = IRB.CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
621 AlignedDynLDSSize = IRB.CreateMul(AlignedDynLDSSize, MaxAlignValue);
622 IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
623
624 // Update the Current Malloc Size
625 *CurrMallocSize = IRB.CreateAdd(*CurrMallocSize, AlignedDynLDSSize);
626 }
627}
628
629static DebugLoc getOrCreateDebugLoc(const Instruction *InsertBefore,
630 DISubprogram *SP) {
631 assert(InsertBefore);
632 if (InsertBefore->getDebugLoc())
633 return InsertBefore->getDebugLoc();
634 if (SP)
635 return DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
636 return DebugLoc();
637}
638
639void AMDGPUSwLowerLDS::getLDSMemoryInstructions(
640 Function *Func, SetVector<Instruction *> &LDSInstructions) {
641 for (BasicBlock &BB : *Func) {
642 for (Instruction &Inst : BB) {
643 if (LoadInst *LI = dyn_cast<LoadInst>(&Inst)) {
644 if (LI->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
645 LDSInstructions.insert(&Inst);
646 } else if (StoreInst *SI = dyn_cast<StoreInst>(&Inst)) {
647 if (SI->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
648 LDSInstructions.insert(&Inst);
649 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&Inst)) {
650 if (RMW->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
651 LDSInstructions.insert(&Inst);
652 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(&Inst)) {
653 if (XCHG->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
654 LDSInstructions.insert(&Inst);
655 } else if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&Inst)) {
656 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
657 ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS)
658 LDSInstructions.insert(&Inst);
659 } else
660 continue;
661 }
662 }
663}
664
665Value *AMDGPUSwLowerLDS::getTranslatedGlobalMemoryPtrOfLDS(Value *LoadMallocPtr,
666 Value *LDSPtr) {
667 assert(LDSPtr && "Invalid LDS pointer operand");
668 Type *LDSPtrType = LDSPtr->getType();
669 LLVMContext &Ctx = M.getContext();
670 const DataLayout &DL = M.getDataLayout();
671 Type *IntTy = DL.getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
672 if (auto *VecPtrTy = dyn_cast<VectorType>(LDSPtrType)) {
673 // Handle vector of pointers
674 ElementCount NumElements = VecPtrTy->getElementCount();
675 IntTy = VectorType::get(IntTy, NumElements);
676 }
677 Value *GepIndex = IRB.CreatePtrToInt(LDSPtr, IntTy);
678 return IRB.CreateInBoundsGEP(IRB.getInt8Ty(), LoadMallocPtr, {GepIndex});
679}
680
681void AMDGPUSwLowerLDS::translateLDSMemoryOperationsToGlobalMemory(
682 Function *Func, Value *LoadMallocPtr,
683 SetVector<Instruction *> &LDSInstructions) {
684 LLVM_DEBUG(dbgs() << "Translating LDS memory operations to global memory : "
685 << Func->getName());
686 for (Instruction *Inst : LDSInstructions) {
687 IRB.SetInsertPoint(Inst);
688 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
689 Value *LIOperand = LI->getPointerOperand();
690 Value *Replacement =
691 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, LIOperand);
692 LoadInst *NewLI = IRB.CreateAlignedLoad(LI->getType(), Replacement,
693 LI->getAlign(), LI->isVolatile());
694 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
695 AsanInfo.Instructions.insert(NewLI);
696 LI->replaceAllUsesWith(NewLI);
697 LI->eraseFromParent();
698 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
699 Value *SIOperand = SI->getPointerOperand();
700 Value *Replacement =
701 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, SIOperand);
702 StoreInst *NewSI = IRB.CreateAlignedStore(
703 SI->getValueOperand(), Replacement, SI->getAlign(), SI->isVolatile());
704 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
705 AsanInfo.Instructions.insert(NewSI);
706 SI->replaceAllUsesWith(NewSI);
707 SI->eraseFromParent();
708 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
709 Value *RMWPtrOperand = RMW->getPointerOperand();
710 Value *RMWValOperand = RMW->getValOperand();
711 Value *Replacement =
712 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, RMWPtrOperand);
713 AtomicRMWInst *NewRMW = IRB.CreateAtomicRMW(
714 RMW->getOperation(), Replacement, RMWValOperand, RMW->getAlign(),
715 RMW->getOrdering(), RMW->getSyncScopeID());
716 NewRMW->setVolatile(RMW->isVolatile());
717 AsanInfo.Instructions.insert(NewRMW);
718 RMW->replaceAllUsesWith(NewRMW);
719 RMW->eraseFromParent();
720 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Inst)) {
721 Value *XCHGPtrOperand = XCHG->getPointerOperand();
722 Value *Replacement =
723 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, XCHGPtrOperand);
725 Replacement, XCHG->getCompareOperand(), XCHG->getNewValOperand(),
726 XCHG->getAlign(), XCHG->getSuccessOrdering(),
727 XCHG->getFailureOrdering(), XCHG->getSyncScopeID());
728 NewXCHG->setVolatile(XCHG->isVolatile());
729 AsanInfo.Instructions.insert(NewXCHG);
730 XCHG->replaceAllUsesWith(NewXCHG);
731 XCHG->eraseFromParent();
732 } else if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(Inst)) {
733 Value *AIOperand = ASC->getPointerOperand();
734 Value *Replacement =
735 getTranslatedGlobalMemoryPtrOfLDS(LoadMallocPtr, AIOperand);
736 Value *NewAI = IRB.CreateAddrSpaceCast(Replacement, ASC->getType());
737 // Note: No need to add the instruction to AsanInfo instructions to be
738 // instrumented list. FLAT_ADDRESS ptr would have been already
739 // instrumented by asan pass prior to this pass.
740 ASC->replaceAllUsesWith(NewAI);
741 ASC->eraseFromParent();
742 } else
743 report_fatal_error("Unimplemented LDS lowering instruction");
744 }
745}
746
747void AMDGPUSwLowerLDS::poisonRedzones(Function *Func, Value *MallocPtr) {
748 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
749 Type *Int64Ty = IRB.getInt64Ty();
750 Type *VoidTy = IRB.getVoidTy();
751 FunctionCallee AsanPoisonRegion = M.getOrInsertFunction(
752 "__asan_poison_region",
753 FunctionType::get(VoidTy, {Int64Ty, Int64Ty}, false));
754
755 auto RedzonesVec = LDSParams.RedzoneOffsetAndSizeVector;
756 size_t VecSize = RedzonesVec.size();
757 for (unsigned i = 0; i < VecSize; i++) {
758 auto &RedzonePair = RedzonesVec[i];
759 uint64_t RedzoneOffset = RedzonePair.first;
760 uint64_t RedzoneSize = RedzonePair.second;
761 Value *RedzoneAddrOffset = IRB.CreateInBoundsGEP(
762 IRB.getInt8Ty(), MallocPtr, {IRB.getInt64(RedzoneOffset)});
763 Value *RedzoneAddress = IRB.CreatePtrToInt(RedzoneAddrOffset, Int64Ty);
764 IRB.CreateCall(AsanPoisonRegion,
765 {RedzoneAddress, IRB.getInt64(RedzoneSize)});
766 }
767}
768
769void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
770 DomTreeUpdater &DTU) {
771 LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : " << Func->getName());
772 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
773 auto &Ctx = M.getContext();
774 auto *PrevEntryBlock = &Func->getEntryBlock();
775 SetVector<Instruction *> LDSInstructions;
776 getLDSMemoryInstructions(Func, LDSInstructions);
777 const DataLayout &DL = M.getDataLayout();
778
779 // Create malloc block.
780 auto *MallocBlock = BasicBlock::Create(Ctx, "Malloc", Func, PrevEntryBlock);
781
782 // Create WIdBlock block which has instructions related to selection of
783 // {0,0,0} indiex work item in the work group.
784 auto *WIdBlock = BasicBlock::Create(Ctx, "WId", Func, MallocBlock);
785
786 // Move constant-size allocas from the original entry block to the new entry
787 // block (WIdBlock) so they remain static allocas. Splice the leading cluster
788 // in bulk, then move any stragglers that are interleaved with other
789 // instructions.
790 auto SplitIt = PrevEntryBlock->getFirstNonPHIOrDbgOrAlloca();
791 WIdBlock->splice(WIdBlock->end(), PrevEntryBlock, PrevEntryBlock->begin(),
792 SplitIt);
793 for (Instruction &I : make_early_inc_range(*PrevEntryBlock))
794 if (auto *AI = dyn_cast<AllocaInst>(&I))
795 if (isa<ConstantInt>(AI->getArraySize()))
796 AI->moveBefore(*WIdBlock, WIdBlock->end());
797
798 IRB.SetInsertPoint(WIdBlock, WIdBlock->end());
799 DebugLoc FirstDL =
800 getOrCreateDebugLoc(&*PrevEntryBlock->begin(), Func->getSubprogram());
801 IRB.SetCurrentDebugLocation(FirstDL);
802 Value *WIdx = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
803 Value *WIdy = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_y, {});
804 Value *WIdz = IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_z, {});
805 Value *XYOr = IRB.CreateOr(WIdx, WIdy);
806 Value *XYZOr = IRB.CreateOr(XYOr, WIdz);
807 Value *WIdzCond = IRB.CreateICmpEQ(XYZOr, IRB.getInt32(0));
808
809 // All work items will branch to PrevEntryBlock except {0,0,0} index
810 // work item which will branch to malloc block.
811 IRB.CreateCondBr(WIdzCond, MallocBlock, PrevEntryBlock);
812
813 // Malloc block
814 IRB.SetInsertPoint(MallocBlock, MallocBlock->begin());
815
816 // If Dynamic LDS globals are accessed by the kernel,
817 // Get the size of dyn lds from hidden dyn_lds_size kernel arg.
818 // Update the corresponding metadata global entries for this dyn lds global.
819 GlobalVariable *SwLDS = LDSParams.SwLDS;
820 GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
821 assert(SwLDS && SwLDSMetadata);
822 StructType *MetadataStructType =
823 cast<StructType>(SwLDSMetadata->getValueType());
824 uint32_t MallocSize = 0;
825 Value *CurrMallocSize;
826 Type *Int32Ty = IRB.getInt32Ty();
827 Type *Int64Ty = IRB.getInt64Ty();
828
829 SetVector<GlobalVariable *> UniqueLDSGlobals;
830 auto GetUniqueLDSGlobals = [&](SetVector<GlobalVariable *> &LDSGlobals) {
831 for (auto &GV : LDSGlobals) {
832 if (is_contained(UniqueLDSGlobals, GV))
833 continue;
834 UniqueLDSGlobals.insert(GV);
835 }
836 };
837
838 GetUniqueLDSGlobals(LDSParams.DirectAccess.StaticLDSGlobals);
839 GetUniqueLDSGlobals(LDSParams.IndirectAccess.StaticLDSGlobals);
840 unsigned NumStaticLDS = 1 + UniqueLDSGlobals.size();
841 UniqueLDSGlobals.clear();
842
843 if (NumStaticLDS) {
844 auto *GEPForEndStaticLDSOffset =
845 IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
846 {ConstantInt::get(Int32Ty, 0),
847 ConstantInt::get(Int32Ty, NumStaticLDS - 1),
848 ConstantInt::get(Int32Ty, 0)});
849
850 auto *GEPForEndStaticLDSSize =
851 IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
852 {ConstantInt::get(Int32Ty, 0),
853 ConstantInt::get(Int32Ty, NumStaticLDS - 1),
854 ConstantInt::get(Int32Ty, 2)});
855
856 Value *EndStaticLDSOffset =
857 IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSOffset);
858 Value *EndStaticLDSSize = IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSSize);
859 CurrMallocSize = IRB.CreateAdd(EndStaticLDSOffset, EndStaticLDSSize);
860 } else
861 CurrMallocSize = IRB.getInt32(MallocSize);
862
863 if (LDSParams.SwDynLDS) {
866 "Dynamic LDS size query is only supported for CO V5 and later.");
867 // Get size from hidden dyn_lds_size argument of kernel
869 IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {});
870 Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP(
871 ImplicitArg->getType(), ImplicitArg,
872 {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)});
873 UniqueLDSGlobals.clear();
874 GetUniqueLDSGlobals(LDSParams.DirectAccess.DynamicLDSGlobals);
875 GetUniqueLDSGlobals(LDSParams.IndirectAccess.DynamicLDSGlobals);
876 updateMallocSizeForDynamicLDS(Func, &CurrMallocSize, HiddenDynLDSSize,
877 UniqueLDSGlobals);
878 }
879
880 CurrMallocSize = IRB.CreateZExt(CurrMallocSize, Int64Ty);
881
882 // Create a call to malloc function which does device global memory allocation
883 // with size equals to all LDS global accesses size in this kernel.
884 Value *ReturnAddress = IRB.CreateIntrinsic(
885 Intrinsic::returnaddress, IRB.getPtrTy(DL.getProgramAddressSpace()),
886 {IRB.getInt32(0)});
887 FunctionCallee MallocFunc = M.getOrInsertFunction(
888 StringRef("__asan_malloc_impl"),
889 FunctionType::get(Int64Ty, {Int64Ty, Int64Ty}, false));
890 Value *RAPtrToInt = IRB.CreatePtrToInt(ReturnAddress, Int64Ty);
891 Value *MallocCall = IRB.CreateCall(MallocFunc, {CurrMallocSize, RAPtrToInt});
892
893 Value *MallocPtr =
895
896 // Create store of malloc to new global
897 IRB.CreateStore(MallocPtr, SwLDS);
898
899 // Create calls to __asan_poison_region to poison redzones.
900 poisonRedzones(Func, MallocPtr);
901
902 // Create branch to PrevEntryBlock
903 IRB.CreateBr(PrevEntryBlock);
904
905 // Create wave-group barrier at the starting of Previous entry block
906 Type *Int1Ty = IRB.getInt1Ty();
907 IRB.SetInsertPoint(PrevEntryBlock, PrevEntryBlock->begin());
908 auto *XYZCondPhi = IRB.CreatePHI(Int1Ty, 2, "xyzCond");
909 XYZCondPhi->addIncoming(IRB.getInt1(0), WIdBlock);
910 XYZCondPhi->addIncoming(IRB.getInt1(1), MallocBlock);
911
912 IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {});
913
914 // Load malloc pointer from Sw LDS.
915 Value *LoadMallocPtr =
917
918 // Replace All uses of LDS globals with new LDS pointers.
919 replaceKernelLDSAccesses(Func);
920
921 // Replace Memory Operations on LDS with corresponding
922 // global memory pointers.
923 translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
924 LDSInstructions);
925
926 auto *CondFreeBlock = BasicBlock::Create(Ctx, "CondFree", Func);
927 auto *FreeBlock = BasicBlock::Create(Ctx, "Free", Func);
928 auto *EndBlock = BasicBlock::Create(Ctx, "End", Func);
929 for (BasicBlock &BB : *Func) {
930 if (!BB.empty()) {
931 if (ReturnInst *RI = dyn_cast<ReturnInst>(&BB.back())) {
932 RI->eraseFromParent();
933 IRB.SetInsertPoint(&BB, BB.end());
934 IRB.CreateBr(CondFreeBlock);
935 }
936 }
937 }
938
939 // Cond Free Block
940 IRB.SetInsertPoint(CondFreeBlock, CondFreeBlock->begin());
941 IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {});
942 IRB.CreateCondBr(XYZCondPhi, FreeBlock, EndBlock);
943
944 // Free Block
945 IRB.SetInsertPoint(FreeBlock, FreeBlock->begin());
946
947 // Free the previously allocate device global memory.
948 FunctionCallee AsanFreeFunc = M.getOrInsertFunction(
949 StringRef("__asan_free_impl"),
950 FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false));
951 Value *ReturnAddr = IRB.CreateIntrinsic(
952 Intrinsic::returnaddress, IRB.getPtrTy(DL.getProgramAddressSpace()),
953 IRB.getInt32(0));
954 Value *RAPToInt = IRB.CreatePtrToInt(ReturnAddr, Int64Ty);
955 Value *MallocPtrToInt = IRB.CreatePtrToInt(LoadMallocPtr, Int64Ty);
956 IRB.CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt});
957
958 IRB.CreateBr(EndBlock);
959
960 // End Block
961 IRB.SetInsertPoint(EndBlock, EndBlock->begin());
962 IRB.CreateRetVoid();
963 // Update the DomTree with corresponding links to basic blocks.
964 DTU.applyUpdates({{DominatorTree::Insert, WIdBlock, MallocBlock},
965 {DominatorTree::Insert, MallocBlock, PrevEntryBlock},
966 {DominatorTree::Insert, CondFreeBlock, FreeBlock},
967 {DominatorTree::Insert, FreeBlock, EndBlock}});
968}
969
970Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
971 Function *Func, SetVector<GlobalVariable *> &Variables) {
972 Type *Int32Ty = IRB.getInt32Ty();
973 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
974
975 GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
976 assert(SwLDSMetadata);
977 auto *SwLDSMetadataStructType =
978 cast<StructType>(SwLDSMetadata->getValueType());
979 ArrayType *KernelOffsetsType =
981
983 for (auto *GV : Variables) {
984 auto It = LDSParams.LDSToReplacementIndicesMap.find(GV);
985 if (It == LDSParams.LDSToReplacementIndicesMap.end()) {
986 Elements.push_back(
988 continue;
989 }
990 auto &Indices = It->second;
991 Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Indices[0]),
992 ConstantInt::get(Int32Ty, Indices[1]),
993 ConstantInt::get(Int32Ty, Indices[2])};
994 Constant *GEP = ConstantExpr::getGetElementPtr(SwLDSMetadataStructType,
995 SwLDSMetadata, GEPIdx, true);
996 Elements.push_back(GEP);
997 }
998 return ConstantArray::get(KernelOffsetsType, Elements);
999}
1000
1001void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
1002 NonKernelLDSParameters &NKLDSParams) {
1003 // Base table will have single row, with elements of the row
1004 // placed as per kernel ID. Each element in the row corresponds
1005 // to addresss of "SW LDS" global of the kernel.
1006 auto &Kernels = NKLDSParams.OrderedKernels;
1007 if (Kernels.empty())
1008 return;
1009 const size_t NumberKernels = Kernels.size();
1010 ArrayType *AllKernelsOffsetsType =
1011 ArrayType::get(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), NumberKernels);
1012 std::vector<Constant *> OverallConstantExprElts(NumberKernels);
1013 for (size_t i = 0; i < NumberKernels; i++) {
1014 Function *Func = Kernels[i];
1015 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
1016 OverallConstantExprElts[i] = LDSParams.SwLDS;
1017 }
1018 Constant *init =
1019 ConstantArray::get(AllKernelsOffsetsType, OverallConstantExprElts);
1020 NKLDSParams.LDSBaseTable = new GlobalVariable(
1021 M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
1022 "llvm.amdgcn.sw.lds.base.table", nullptr, GlobalValue::NotThreadLocal,
1025 MD.NoAddress = true;
1026 NKLDSParams.LDSBaseTable->setSanitizerMetadata(MD);
1027}
1028
1029void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
1030 NonKernelLDSParameters &NKLDSParams) {
1031 // Offset table will have multiple rows and columns.
1032 // Rows are assumed to be from 0 to (n-1). n is total number
1033 // of kernels accessing the LDS through non-kernels.
1034 // Each row will have m elements. m is the total number of
1035 // unique LDS globals accessed by non-kernels.
1036 // Each element in the row correspond to the address of
1037 // the replacement of LDS global done by that particular kernel.
1038 auto &Variables = NKLDSParams.OrdereLDSGlobals;
1039 auto &Kernels = NKLDSParams.OrderedKernels;
1040 if (Variables.empty() || Kernels.empty())
1041 return;
1042 const size_t NumberVariables = Variables.size();
1043 const size_t NumberKernels = Kernels.size();
1044
1045 ArrayType *KernelOffsetsType =
1046 ArrayType::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), NumberVariables);
1047
1048 ArrayType *AllKernelsOffsetsType =
1049 ArrayType::get(KernelOffsetsType, NumberKernels);
1050 std::vector<Constant *> overallConstantExprElts(NumberKernels);
1051 for (size_t i = 0; i < NumberKernels; i++) {
1052 Function *Func = Kernels[i];
1053 overallConstantExprElts[i] =
1054 getAddressesOfVariablesInKernel(Func, Variables);
1055 }
1056 Constant *Init =
1057 ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
1058 NKLDSParams.LDSOffsetTable = new GlobalVariable(
1059 M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, Init,
1060 "llvm.amdgcn.sw.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
1063 MD.NoAddress = true;
1064 NKLDSParams.LDSOffsetTable->setSanitizerMetadata(MD);
1065}
1066
1067void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
1068 Function *Func, SetVector<GlobalVariable *> &LDSGlobals,
1069 NonKernelLDSParameters &NKLDSParams) {
1070 // Replace LDS access in non-kernel with replacement queried from
1071 // Base table and offset from offset table.
1072 LLVM_DEBUG(dbgs() << "Sw LDS lowering, lower non-kernel access for : "
1073 << Func->getName());
1074 auto InsertAt = Func->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
1075 IRB.SetInsertPoint(InsertAt);
1076
1077 // Get LDS memory instructions.
1078 SetVector<Instruction *> LDSInstructions;
1079 getLDSMemoryInstructions(Func, LDSInstructions);
1080
1081 auto *KernelId = IRB.CreateIntrinsic(Intrinsic::amdgcn_lds_kernel_id, {});
1082 GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable;
1083 GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable;
1084 auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals;
1085 Value *BaseGEP = IRB.CreateInBoundsGEP(
1086 LDSBaseTable->getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
1087 Value *BaseLoad =
1088 IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), BaseGEP);
1089 Value *LoadMallocPtr =
1090 IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), BaseLoad);
1091
1092 for (GlobalVariable *GV : LDSGlobals) {
1093 const auto *GVIt = llvm::find(OrdereLDSGlobals, GV);
1094 assert(GVIt != OrdereLDSGlobals.end());
1095 uint32_t GVOffset = std::distance(OrdereLDSGlobals.begin(), GVIt);
1096
1097 Value *OffsetGEP = IRB.CreateInBoundsGEP(
1098 LDSOffsetTable->getValueType(), LDSOffsetTable,
1099 {IRB.getInt32(0), KernelId, IRB.getInt32(GVOffset)});
1100 Value *OffsetLoad =
1101 IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), OffsetGEP);
1102 Value *Offset = IRB.CreateLoad(IRB.getInt32Ty(), OffsetLoad);
1103 Value *BasePlusOffset =
1104 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), BaseLoad, {Offset});
1105 LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replace non-kernel LDS for "
1106 << GV->getName());
1107 replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
1108 }
1109 translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
1110 LDSInstructions);
1111}
1112
1113static void reorderStaticDynamicIndirectLDSSet(KernelLDSParameters &LDSParams) {
1114 // Sort Static, dynamic LDS globals which are either
1115 // direct or indirect access on basis of name.
1116 auto &DirectAccess = LDSParams.DirectAccess;
1117 auto &IndirectAccess = LDSParams.IndirectAccess;
1118 LDSParams.DirectAccess.StaticLDSGlobals = sortByName(
1119 std::vector<GlobalVariable *>(DirectAccess.StaticLDSGlobals.begin(),
1120 DirectAccess.StaticLDSGlobals.end()));
1121 LDSParams.DirectAccess.DynamicLDSGlobals = sortByName(
1122 std::vector<GlobalVariable *>(DirectAccess.DynamicLDSGlobals.begin(),
1123 DirectAccess.DynamicLDSGlobals.end()));
1124 LDSParams.IndirectAccess.StaticLDSGlobals = sortByName(
1125 std::vector<GlobalVariable *>(IndirectAccess.StaticLDSGlobals.begin(),
1126 IndirectAccess.StaticLDSGlobals.end()));
1127 LDSParams.IndirectAccess.DynamicLDSGlobals = sortByName(
1128 std::vector<GlobalVariable *>(IndirectAccess.DynamicLDSGlobals.begin(),
1129 IndirectAccess.DynamicLDSGlobals.end()));
1130}
1131
1132void AMDGPUSwLowerLDS::initAsanInfo() {
1133 // Get Shadow mapping scale and offset.
1134 unsigned LongSize =
1135 M.getDataLayout().getPointerSizeInBits(AMDGPUAS::GLOBAL_ADDRESS);
1137 int Scale;
1138 bool OrShadowOffset;
1139 llvm::getAddressSanitizerParams(AMDGPUTM.getTargetTriple(), LongSize, false,
1140 &Offset, &Scale, &OrShadowOffset);
1141 AsanInfo.Scale = Scale;
1142 AsanInfo.Offset = Offset;
1143}
1144
1145static bool hasFnWithSanitizeAddressAttr(FunctionVariableMap &LDSAccesses) {
1146 for (auto &K : LDSAccesses) {
1147 Function *F = K.first;
1148 if (!F)
1149 continue;
1150 if (F->hasFnAttribute(Attribute::SanitizeAddress))
1151 return true;
1152 }
1153 return false;
1154}
1155
1156bool AMDGPUSwLowerLDS::run() {
1157 bool Changed = false;
1158
1159 CallGraph CG = CallGraph(M);
1160
1162
1163 // Get all the direct and indirect access of LDS for all the kernels.
1164 LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
1165
1166 // Flag to decide whether to lower all the LDS accesses
1167 // based on sanitize_address attribute.
1168 bool LowerAllLDS = hasFnWithSanitizeAddressAttr(LDSUsesInfo.direct_access) ||
1169 hasFnWithSanitizeAddressAttr(LDSUsesInfo.indirect_access);
1170
1171 if (!LowerAllLDS)
1172 return Changed;
1173
1174 // Utility to group LDS access into direct, indirect, static and dynamic.
1175 auto PopulateKernelStaticDynamicLDS = [&](FunctionVariableMap &LDSAccesses,
1176 bool DirectAccess) {
1177 for (auto &K : LDSAccesses) {
1178 Function *F = K.first;
1179 if (!F || K.second.empty())
1180 continue;
1181
1182 assert(isKernel(*F));
1183
1184 // Only inserts if key isn't already in the map.
1185 FuncLDSAccessInfo.KernelToLDSParametersMap.insert(
1186 {F, KernelLDSParameters()});
1187
1188 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[F];
1189 if (!DirectAccess)
1190 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess.insert(F);
1191 for (GlobalVariable *GV : K.second) {
1192 if (!DirectAccess) {
1193 if (AMDGPU::isDynamicLDS(*GV))
1194 LDSParams.IndirectAccess.DynamicLDSGlobals.insert(GV);
1195 else
1196 LDSParams.IndirectAccess.StaticLDSGlobals.insert(GV);
1197 FuncLDSAccessInfo.AllNonKernelLDSAccess.insert(GV);
1198 } else {
1199 if (AMDGPU::isDynamicLDS(*GV))
1200 LDSParams.DirectAccess.DynamicLDSGlobals.insert(GV);
1201 else
1202 LDSParams.DirectAccess.StaticLDSGlobals.insert(GV);
1203 }
1204 }
1205 }
1206 };
1207
1208 PopulateKernelStaticDynamicLDS(LDSUsesInfo.direct_access, true);
1209 PopulateKernelStaticDynamicLDS(LDSUsesInfo.indirect_access, false);
1210
1211 // Get address sanitizer scale.
1212 initAsanInfo();
1213
1214 for (auto &K : FuncLDSAccessInfo.KernelToLDSParametersMap) {
1215 Function *Func = K.first;
1216 auto &LDSParams = FuncLDSAccessInfo.KernelToLDSParametersMap[Func];
1217 if (LDSParams.DirectAccess.StaticLDSGlobals.empty() &&
1218 LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
1219 LDSParams.IndirectAccess.StaticLDSGlobals.empty() &&
1220 LDSParams.IndirectAccess.DynamicLDSGlobals.empty()) {
1221 Changed = false;
1222 } else {
1224 CG, Func,
1225 {"amdgpu-no-workitem-id-x", "amdgpu-no-workitem-id-y",
1226 "amdgpu-no-workitem-id-z", "amdgpu-no-heap-ptr"});
1227 if (!LDSParams.IndirectAccess.StaticLDSGlobals.empty() ||
1228 !LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
1229 removeFnAttrFromReachable(CG, Func, {"amdgpu-no-lds-kernel-id"});
1230 reorderStaticDynamicIndirectLDSSet(LDSParams);
1231 buildSwLDSGlobal(Func);
1232 buildSwDynLDSGlobal(Func);
1233 populateSwMetadataGlobal(Func);
1234 populateSwLDSAttributeAndMetadata(Func);
1235 populateLDSToReplacementIndicesMap(Func);
1236 DomTreeUpdater DTU(DTCallback(*Func),
1237 DomTreeUpdater::UpdateStrategy::Lazy);
1238 lowerKernelLDSAccesses(Func, DTU);
1239 Changed = true;
1240 }
1241 }
1242
1243 // Get the Uses of LDS from non-kernels.
1244 getUsesOfLDSByNonKernels();
1245
1246 // Get non-kernels with LDS ptr as argument and called by kernels.
1247 getNonKernelsWithLDSArguments(CG);
1248
1249 // Lower LDS accesses in non-kernels.
1250 if (!FuncLDSAccessInfo.NonKernelToLDSAccessMap.empty() ||
1251 !FuncLDSAccessInfo.NonKernelsWithLDSArgument.empty()) {
1252 NonKernelLDSParameters NKLDSParams;
1253 NKLDSParams.OrderedKernels = getOrderedIndirectLDSAccessingKernels(
1254 FuncLDSAccessInfo.KernelsWithIndirectLDSAccess);
1255 NKLDSParams.OrdereLDSGlobals = getOrderedNonKernelAllLDSGlobals(
1256 FuncLDSAccessInfo.AllNonKernelLDSAccess);
1257 buildNonKernelLDSBaseTable(NKLDSParams);
1258 buildNonKernelLDSOffsetTable(NKLDSParams);
1259 for (auto &K : FuncLDSAccessInfo.NonKernelToLDSAccessMap) {
1260 Function *Func = K.first;
1261 DenseSet<GlobalVariable *> &LDSGlobals = K.second;
1262 SetVector<GlobalVariable *> OrderedLDSGlobals = sortByName(
1263 std::vector<GlobalVariable *>(LDSGlobals.begin(), LDSGlobals.end()));
1264 lowerNonKernelLDSAccesses(Func, OrderedLDSGlobals, NKLDSParams);
1265 }
1266 for (Function *Func : FuncLDSAccessInfo.NonKernelsWithLDSArgument) {
1267 auto &K = FuncLDSAccessInfo.NonKernelToLDSAccessMap;
1268 if (K.contains(Func))
1269 continue;
1271 lowerNonKernelLDSAccesses(Func, Vec, NKLDSParams);
1272 }
1273 Changed = true;
1274 }
1275
1276 if (!Changed)
1277 return Changed;
1278
1279 for (auto &GV : make_early_inc_range(M.globals())) {
1281 // probably want to remove from used lists
1283 if (GV.use_empty())
1284 GV.eraseFromParent();
1285 }
1286 }
1287
1288 if (AsanInstrumentLDS) {
1289 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
1290 for (Instruction *Inst : AsanInfo.Instructions) {
1291 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
1292 getInterestingMemoryOperands(M, Inst, InterestingOperands);
1293 llvm::append_range(OperandsToInstrument, InterestingOperands);
1294 }
1295 for (auto &Operand : OperandsToInstrument) {
1296 Value *Addr = Operand.getPtr();
1297 instrumentAddress(M, IRB, Operand.getInsn(), Operand.getInsn(), Addr,
1298 Operand.Alignment.valueOrOne(), Operand.TypeStoreSize,
1299 Operand.IsWrite, nullptr, false, false, AsanInfo.Scale,
1300 AsanInfo.Offset);
1301 Changed = true;
1302 }
1303 }
1304
1305 return Changed;
1306}
1307
1308class AMDGPUSwLowerLDSLegacy : public ModulePass {
1309public:
1310 const AMDGPUTargetMachine *AMDGPUTM;
1311 static char ID;
1312 AMDGPUSwLowerLDSLegacy(const AMDGPUTargetMachine *TM)
1313 : ModulePass(ID), AMDGPUTM(TM) {}
1314 bool runOnModule(Module &M) override;
1315 void getAnalysisUsage(AnalysisUsage &AU) const override {
1317 }
1318};
1319} // namespace
1320
1321char AMDGPUSwLowerLDSLegacy::ID = 0;
1322char &llvm::AMDGPUSwLowerLDSLegacyPassID = AMDGPUSwLowerLDSLegacy::ID;
1323
1324INITIALIZE_PASS_BEGIN(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
1325 "AMDGPU Software lowering of LDS", false, false)
1327INITIALIZE_PASS_END(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
1328 "AMDGPU Software lowering of LDS", false, false)
1329
1330bool AMDGPUSwLowerLDSLegacy::runOnModule(Module &M) {
1331 // AddressSanitizer pass adds "nosanitize_address" module flag if it has
1332 // instrumented the IR. Return early if the flag is not present.
1333 if (!M.getModuleFlag("nosanitize_address"))
1334 return false;
1335 DominatorTreeWrapperPass *const DTW =
1336 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1337 auto DTCallback = [&DTW](Function &F) -> DominatorTree * {
1338 return DTW ? &DTW->getDomTree() : nullptr;
1339 };
1340 if (!AMDGPUTM) {
1341 auto &TPC = getAnalysis<TargetPassConfig>();
1342 AMDGPUTM = &TPC.getTM<AMDGPUTargetMachine>();
1343 }
1344 AMDGPUSwLowerLDS SwLowerLDSImpl(M, *AMDGPUTM, DTCallback);
1345 bool IsChanged = SwLowerLDSImpl.run();
1346 return IsChanged;
1347}
1348
1349ModulePass *
1351 return new AMDGPUSwLowerLDSLegacy(TM);
1352}
1353
1356 // AddressSanitizer pass adds "nosanitize_address" module flag if it has
1357 // instrumented the IR. Return early if the flag is not present.
1358 if (!M.getModuleFlag("nosanitize_address"))
1359 return PreservedAnalyses::all();
1360 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1361 auto DTCallback = [&FAM](Function &F) -> DominatorTree * {
1362 return &FAM.getResult<DominatorTreeAnalysis>(F);
1363 };
1364 AMDGPUSwLowerLDS SwLowerLDSImpl(M, TM, DTCallback);
1365 bool IsChanged = SwLowerLDSImpl.run();
1366 if (!IsChanged)
1367 return PreservedAnalyses::all();
1368
1371 return PA;
1372}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
Hexagon Common GEP
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
FunctionAnalysisManager FAM
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file implements a set that has insertion order iteration characteristics.
static Split data
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Target-Independent Code Generator Pass Configuration Options pass.
static DebugLoc getOrCreateDebugLoc(const Instruction *InsertBefore, DISubprogram *SP)
This class represents a conversion between pointers from one address space to another.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
an instruction that atomically reads a memory location, combines it with another value,...
void setVolatile(bool V)
Specify whether this is a volatile RMW or not.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
A node in the call graph for a module.
Definition CallGraph.h:162
Function * getFunction() const
Returns the function that this call graph node represents.
Definition CallGraph.h:193
The basic data container for the call graph of a Module of IR.
Definition CallGraph.h:72
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:537
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1445
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Subprogram description. Uses SubclassData1.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:316
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
arg_iterator arg_end()
Definition Function.h:877
arg_iterator arg_begin()
Definition Function.h:868
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
LLVM_ABI void setSanitizerMetadata(SanitizerMetadata Meta)
Definition Globals.cpp:260
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
Type * getValueType() const
uint64_t getAlignment() const
FIXME: Remove this function once transition to Align is over.
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:538
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition IRBuilder.h:497
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Definition IRBuilder.h:1939
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:564
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1905
CondBrInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
Definition IRBuilder.h:1224
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2205
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:579
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:584
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition IRBuilder.h:1986
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1481
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition IRBuilder.h:527
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition IRBuilder.h:1218
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2507
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2342
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1888
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2088
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition IRBuilder.h:1195
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1901
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1430
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2200
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2521
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Definition IRBuilder.h:1952
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Definition IRBuilder.h:622
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:617
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1924
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1600
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:569
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2215
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1464
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2822
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LLVM_ABI MDNode * createRange(const APInt &Lo, const APInt &Hi)
Return metadata describing the range [Lo, Hi).
Definition MDBuilder.cpp:96
Metadata node.
Definition Metadata.h:1080
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
Root of the metadata hierarchy.
Definition Metadata.h:64
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
A container for an operand bundle being viewed as a set of values rather than a set of uses.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
iterator end()
Get an iterator to the end of the SetVector.
Definition SetVector.h:112
void clear()
Completely clear the SetVector.
Definition SetVector.h:267
bool empty() const
Determine if the SetVector is empty or not.
Definition SetVector.h:100
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Class to represent struct types.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition Type.cpp:689
const Triple & getTargetTriple() const
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
bool use_empty() const
Definition Value.h:346
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:557
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
An efficient, type-erasing, non-owning reference to a callable.
A raw_ostream that writes to an std::string.
A raw_ostream that writes to an SmallVector or SmallString.
StringRef str() const
Return a StringRef for the vector contents.
Changed
@ LOCAL_ADDRESS
Address space for local memory.
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
bool isDynamicLDS(const GlobalVariable &GV)
unsigned getAMDHSACodeObjectVersion(const Module &M)
void removeFnAttrFromReachable(CallGraph &CG, Function *KernelRoot, ArrayRef< StringRef > FnAttrs)
Strip FnAttr attribute from any functions where we may have introduced its use.
LLVM_READNONE constexpr bool isKernel(CallingConv::ID CC)
LDSUsesInfoTy getTransitiveUsesOfLDS(const CallGraph &CG, Module &M)
DenseMap< Function *, DenseSet< GlobalVariable * > > FunctionVariableMap
bool isLDSVariableToLower(const GlobalVariable &GV)
bool eliminateConstantExprUsesOfLDSFromAllInstructions(Module &M)
Align getAlign(const DataLayout &DL, const GlobalVariable *GV)
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1765
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
ModulePass * createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
char & AMDGPUSwLowerLDSLegacyPassID
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
const AMDGPUTargetMachine & TM
Definition AMDGPU.h:326
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
FunctionVariableMap direct_access
FunctionVariableMap indirect_access
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39