LLVM 17.0.0git
AMDGPUPromoteAlloca.cpp
Go to the documentation of this file.
1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Eliminates allocas by either converting them into vectors or by migrating
10// them to local address space.
11//
12// Two passes are exposed by this file:
13// - "promote-alloca-to-vector", which runs early in the pipeline and only
14// promotes to vector. Promotion to vector is almost always profitable
15// except when the alloca is too big and the promotion would result in
16// very high register pressure.
17// - "promote-alloca", which does both promotion to vector and LDS and runs
18// much later in the pipeline. This runs after SROA because promoting to
19// LDS is of course less profitable than getting rid of the alloca or
20// vectorizing it, thus we only want to do it when the only alternative is
21// lowering the alloca to stack.
22//
23// Note that both of them exist for the old and new PMs. The new PM passes are
24// declared in AMDGPU.h and the legacy PM ones are declared here.s
25//
26//===----------------------------------------------------------------------===//
27
28#include "AMDGPU.h"
29#include "GCNSubtarget.h"
34#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/IntrinsicsAMDGPU.h"
37#include "llvm/IR/IntrinsicsR600.h"
39#include "llvm/Pass.h"
41
42#define DEBUG_TYPE "amdgpu-promote-alloca"
43
44using namespace llvm;
45
46namespace {
47
48static cl::opt<bool> DisablePromoteAllocaToVector(
49 "disable-promote-alloca-to-vector",
50 cl::desc("Disable promote alloca to vector"),
51 cl::init(false));
52
53static cl::opt<bool> DisablePromoteAllocaToLDS(
54 "disable-promote-alloca-to-lds",
55 cl::desc("Disable promote alloca to LDS"),
56 cl::init(false));
57
58static cl::opt<unsigned> PromoteAllocaToVectorLimit(
59 "amdgpu-promote-alloca-to-vector-limit",
60 cl::desc("Maximum byte size to consider promote alloca to vector"),
61 cl::init(0));
62
63// Shared implementation which can do both promotion to vector and to LDS.
64class AMDGPUPromoteAllocaImpl {
65private:
66 const TargetMachine &TM;
67 Module *Mod = nullptr;
68 const DataLayout *DL = nullptr;
69
70 // FIXME: This should be per-kernel.
71 uint32_t LocalMemLimit = 0;
72 uint32_t CurrentLocalMemUsage = 0;
73 unsigned MaxVGPRs;
74
75 bool IsAMDGCN = false;
76 bool IsAMDHSA = false;
77
78 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
79 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
80
81 /// BaseAlloca is the alloca root the search started from.
82 /// Val may be that alloca or a recursive user of it.
83 bool collectUsesWithPtrTypes(Value *BaseAlloca,
84 Value *Val,
85 std::vector<Value*> &WorkList) const;
86
87 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
88 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
89 /// Returns true if both operands are derived from the same alloca. Val should
90 /// be the same value as one of the input operands of UseInst.
91 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
92 Instruction *UseInst,
93 int OpIdx0, int OpIdx1) const;
94
95 /// Check whether we have enough local memory for promotion.
96 bool hasSufficientLocalMem(const Function &F);
97
98 bool tryPromoteAllocaToVector(AllocaInst &I);
99 bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
100
101public:
102 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {
103 const Triple &TT = TM.getTargetTriple();
104 IsAMDGCN = TT.getArch() == Triple::amdgcn;
105 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
106 }
107
108 bool run(Function &F, bool PromoteToLDS);
109};
110
111// FIXME: This can create globals so should be a module pass.
112class AMDGPUPromoteAlloca : public FunctionPass {
113public:
114 static char ID;
115
116 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
117
118 bool runOnFunction(Function &F) override {
119 if (skipFunction(F))
120 return false;
121 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
122 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
123 .run(F, /*PromoteToLDS*/ true);
124 return false;
125 }
126
127 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
128
129 void getAnalysisUsage(AnalysisUsage &AU) const override {
130 AU.setPreservesCFG();
132 }
133};
134
135class AMDGPUPromoteAllocaToVector : public FunctionPass {
136public:
137 static char ID;
138
139 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
140
141 bool runOnFunction(Function &F) override {
142 if (skipFunction(F))
143 return false;
144 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
145 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
146 .run(F, /*PromoteToLDS*/ false);
147 return false;
148 }
149
150 StringRef getPassName() const override {
151 return "AMDGPU Promote Alloca to vector";
152 }
153
154 void getAnalysisUsage(AnalysisUsage &AU) const override {
155 AU.setPreservesCFG();
157 }
158};
159
160unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
161 if (!TM.getTargetTriple().isAMDGCN())
162 return 128;
163
164 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
165 unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
166
167 // A non-entry function has only 32 caller preserved registers.
168 // Do not promote alloca which will force spilling unless we know the function
169 // will be inlined.
170 if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
171 !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
172 MaxVGPRs = std::min(MaxVGPRs, 32u);
173 return MaxVGPRs;
174}
175
176} // end anonymous namespace
177
178char AMDGPUPromoteAlloca::ID = 0;
179char AMDGPUPromoteAllocaToVector::ID = 0;
180
182 "AMDGPU promote alloca to vector or LDS", false, false)
183// Move LDS uses from functions to kernels before promote alloca for accurate
184// estimation of LDS available
185INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)
186INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
187 "AMDGPU promote alloca to vector or LDS", false, false)
188
189INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
190 "AMDGPU promote alloca to vector", false, false)
191
192char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
193char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
194
197 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ true);
198 if (Changed) {
201 return PA;
202 }
203 return PreservedAnalyses::all();
204}
205
208 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ false);
209 if (Changed) {
212 return PA;
213 }
214 return PreservedAnalyses::all();
215}
216
218 return new AMDGPUPromoteAlloca();
219}
220
222 return new AMDGPUPromoteAllocaToVector();
223}
224
225bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
226 Mod = F.getParent();
227 DL = &Mod->getDataLayout();
228
230 if (!ST.isPromoteAllocaEnabled())
231 return false;
232
233 MaxVGPRs = getMaxVGPRs(TM, F);
234
235 bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
236
238 for (Instruction &I : F.getEntryBlock()) {
239 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
240 // Array allocations are probably not worth handling, since an allocation
241 // of the array type is the canonical form.
242 if (!AI->isStaticAlloca() || AI->isArrayAllocation())
243 continue;
244 Allocas.push_back(AI);
245 }
246 }
247
248 bool Changed = false;
249 for (AllocaInst *AI : Allocas) {
250 if (tryPromoteAllocaToVector(*AI))
251 Changed = true;
252 else if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
253 Changed = true;
254 }
255
256 return Changed;
257}
258
260 ConstantInt *SrcIndex = nullptr;
261 ConstantInt *DestIndex = nullptr;
262};
263
264// Checks if the instruction I is a memset user of the alloca AI that we can
265// deal with. Currently, only non-volatile memsets that affect the whole alloca
266// are handled.
268 const DataLayout &DL) {
269 using namespace PatternMatch;
270 // For now we only care about non-volatile memsets that affect the whole type
271 // (start at index 0 and fill the whole alloca).
272 const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
273 return I->getOperand(0) == AI &&
274 match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
275}
276
277static Value *
279 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
280 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
281 if (!GEP)
282 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
283
284 auto I = GEPIdx.find(GEP);
285 assert(I != GEPIdx.end() && "Must have entry for GEP!");
286 return I->second;
287}
288
290 Type *VecElemTy, const DataLayout &DL) {
291 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
292 // helper.
293 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
294 MapVector<Value *, APInt> VarOffsets;
295 APInt ConstOffset(BW, 0);
296 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
297 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
298 return nullptr;
299
300 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
301 if (VarOffsets.size() > 1)
302 return nullptr;
303
304 if (VarOffsets.size() == 1) {
305 // Only handle cases where we don't need to insert extra arithmetic
306 // instructions.
307 const auto &VarOffset = VarOffsets.front();
308 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
309 return nullptr;
310 return VarOffset.first;
311 }
312
313 APInt Quot;
314 uint64_t Rem;
315 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
316 if (Rem != 0)
317 return nullptr;
318
319 return ConstantInt::get(GEP->getContext(), Quot);
320}
321
322// FIXME: Should try to pick the most likely to be profitable allocas first.
323bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
324 LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
325
326 if (DisablePromoteAllocaToVector) {
327 LLVM_DEBUG(dbgs() << " Promote alloca to vector is disabled\n");
328 return false;
329 }
330
331 Type *AllocaTy = Alloca.getAllocatedType();
332 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
333 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
334 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
335 ArrayTy->getNumElements() > 0)
336 VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
337 ArrayTy->getNumElements());
338 }
339
340 // Use up to 1/4 of available register budget for vectorization.
341 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
342 : (MaxVGPRs * 32);
343
344 if (DL->getTypeSizeInBits(AllocaTy) * 4 > Limit) {
345 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " << MaxVGPRs
346 << " registers available\n");
347 return false;
348 }
349
350 // FIXME: There is no reason why we can't support larger arrays, we
351 // are just being conservative for now.
352 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
353 // equivalent. Potentially these could also be promoted but we don't currently
354 // handle this case
355 if (!VectorTy) {
356 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
357 return false;
358 }
359
360 if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
361 LLVM_DEBUG(dbgs() << " " << *VectorTy
362 << " has an unsupported number of elements\n");
363 return false;
364 }
365
366 std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
368 SmallVector<Instruction *> DeferredInsts;
371
372 const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
373 LLVM_DEBUG(dbgs() << " Cannot promote alloca to vector: " << Msg << "\n"
374 << " " << *Inst << "\n");
375 return false;
376 };
377
378 for (Use &U : Alloca.uses())
379 Uses.push_back(&U);
380
381 LLVM_DEBUG(dbgs() << " Attempting promotion to: " << *VectorTy << "\n");
382
383 Type *VecEltTy = VectorTy->getElementType();
384 unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
385 while (!Uses.empty()) {
386 Use *U = Uses.pop_back_val();
387 Instruction *Inst = cast<Instruction>(U->getUser());
388
389 if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
390 // This is a store of the pointer, not to the pointer.
391 if (isa<StoreInst>(Inst) &&
392 U->getOperandNo() != StoreInst::getPointerOperandIndex())
393 return RejectUser(Inst, "pointer is being stored");
394
395 Type *AccessTy = getLoadStoreType(Inst);
396 Ptr = Ptr->stripPointerCasts();
397
398 // Alloca already accessed as vector, leave alone.
399 if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
400 DL->getTypeStoreSize(AccessTy))
401 continue;
402
403 // Check that this is a simple access of a vector element.
404 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
405 : cast<StoreInst>(Inst)->isSimple();
406 if (!IsSimple ||
407 !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, *DL))
408 return RejectUser(Inst, "not simple and/or vector element type not "
409 "castable to access type");
410
411 WorkList.push_back(Inst);
412 continue;
413 }
414
415 if (isa<BitCastInst>(Inst)) {
416 // Look through bitcasts.
417 for (Use &U : Inst->uses())
418 Uses.push_back(&U);
419 continue;
420 }
421
422 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
423 // If we can't compute a vector index from this GEP, then we can't
424 // promote this alloca to vector.
425 Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
426 if (!Index)
427 return RejectUser(Inst, "cannot compute vector index for GEP");
428
429 GEPVectorIdx[GEP] = Index;
430 for (Use &U : Inst->uses())
431 Uses.push_back(&U);
432 continue;
433 }
434
435 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
436 MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
437 WorkList.push_back(Inst);
438 continue;
439 }
440
441 if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
442 if (TransferInst->isVolatile())
443 return RejectUser(Inst, "mem transfer inst is volatile");
444
445 ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
446 if (!Len || (Len->getZExtValue() % ElementSize))
447 return RejectUser(Inst, "mem transfer inst length is non-constant or "
448 "not a multiple of the vector element size");
449
450 if (!TransferInfo.count(TransferInst)) {
451 DeferredInsts.push_back(Inst);
452 WorkList.push_back(Inst);
453 TransferInfo[TransferInst] = MemTransferInfo();
454 }
455
456 auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
457 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
458 if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
459 return nullptr;
460
461 return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
462 };
463
464 unsigned OpNum = U->getOperandNo();
465 MemTransferInfo *TI = &TransferInfo[TransferInst];
466 if (OpNum == 0) {
467 Value *Dest = TransferInst->getDest();
468 ConstantInt *Index = getPointerIndexOfAlloca(Dest);
469 if (!Index)
470 return RejectUser(Inst, "could not calculate constant dest index");
471 TI->DestIndex = Index;
472 } else {
473 assert(OpNum == 1);
474 Value *Src = TransferInst->getSource();
475 ConstantInt *Index = getPointerIndexOfAlloca(Src);
476 if (!Index)
477 return RejectUser(Inst, "could not calculate constant src index");
478 TI->SrcIndex = Index;
479 }
480 continue;
481 }
482
483 // Ignore assume-like intrinsics and comparisons used in assumes.
484 if (isAssumeLikeIntrinsic(Inst))
485 continue;
486
487 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
488 return isAssumeLikeIntrinsic(cast<Instruction>(U));
489 }))
490 continue;
491
492 return RejectUser(Inst, "unhandled alloca user");
493 }
494
495 while (!DeferredInsts.empty()) {
496 Instruction *Inst = DeferredInsts.pop_back_val();
497 MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
498 // TODO: Support the case if the pointers are from different alloca or
499 // from different address spaces.
500 MemTransferInfo &Info = TransferInfo[TransferInst];
501 if (!Info.SrcIndex || !Info.DestIndex)
502 return RejectUser(
503 Inst, "mem transfer inst is missing constant src and/or dst index");
504 }
505
506 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
507 << *VectorTy << '\n');
508
509 for (Instruction *Inst : WorkList) {
510 IRBuilder<> Builder(Inst);
511 switch (Inst->getOpcode()) {
512 case Instruction::Load: {
513 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
514 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
515 Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
516 Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
517 Value *VecValue =
518 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
519 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
520 if (Inst->getType() != VecEltTy)
521 ExtractElement =
522 Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
523 Inst->replaceAllUsesWith(ExtractElement);
524 Inst->eraseFromParent();
525 break;
526 }
527 case Instruction::Store: {
528 StoreInst *SI = cast<StoreInst>(Inst);
529 Value *Ptr = SI->getPointerOperand();
530 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
531 Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
532 Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
533 Value *VecValue =
534 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
535 Value *Elt = SI->getValueOperand();
536 if (Elt->getType() != VecEltTy)
537 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
538 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
539 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca.getAlign());
540 Inst->eraseFromParent();
541 break;
542 }
543 case Instruction::Call: {
544 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) {
545 ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
546 unsigned NumCopied = Length->getZExtValue() / ElementSize;
547 MemTransferInfo *TI = &TransferInfo[cast<MemTransferInst>(Inst)];
548 unsigned SrcBegin = TI->SrcIndex->getZExtValue();
549 unsigned DestBegin = TI->DestIndex->getZExtValue();
550
552 for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
553 if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
554 Mask.push_back(SrcBegin++);
555 } else {
556 Mask.push_back(Idx);
557 }
558 }
559 Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
560 Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
561 Value *VecValue =
562 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
563 Value *NewVecValue = Builder.CreateShuffleVector(VecValue, Mask);
564 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca.getAlign());
565
566 Inst->eraseFromParent();
567 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
568 // Ensure the length parameter of the memsets matches the new vector
569 // type's. In general, the type size shouldn't change so this is a
570 // no-op, but it's better to be safe.
571 MSI->setOperand(2, Builder.getInt64(DL->getTypeStoreSize(VectorTy)));
572 } else {
573 llvm_unreachable("Unsupported call when promoting alloca to vector");
574 }
575 break;
576 }
577
578 default:
579 llvm_unreachable("Inconsistency in instructions promotable to vector");
580 }
581 }
582
583 return true;
584}
585
586std::pair<Value *, Value *>
587AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
588 Function &F = *Builder.GetInsertBlock()->getParent();
590
591 if (!IsAMDHSA) {
592 Function *LocalSizeYFn =
593 Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
594 Function *LocalSizeZFn =
595 Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
596
597 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
598 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
599
600 ST.makeLIDRangeMetadata(LocalSizeY);
601 ST.makeLIDRangeMetadata(LocalSizeZ);
602
603 return std::pair(LocalSizeY, LocalSizeZ);
604 }
605
606 // We must read the size out of the dispatch pointer.
607 assert(IsAMDGCN);
608
609 // We are indexing into this struct, and want to extract the workgroup_size_*
610 // fields.
611 //
612 // typedef struct hsa_kernel_dispatch_packet_s {
613 // uint16_t header;
614 // uint16_t setup;
615 // uint16_t workgroup_size_x ;
616 // uint16_t workgroup_size_y;
617 // uint16_t workgroup_size_z;
618 // uint16_t reserved0;
619 // uint32_t grid_size_x ;
620 // uint32_t grid_size_y ;
621 // uint32_t grid_size_z;
622 //
623 // uint32_t private_segment_size;
624 // uint32_t group_segment_size;
625 // uint64_t kernel_object;
626 //
627 // #ifdef HSA_LARGE_MODEL
628 // void *kernarg_address;
629 // #elif defined HSA_LITTLE_ENDIAN
630 // void *kernarg_address;
631 // uint32_t reserved1;
632 // #else
633 // uint32_t reserved1;
634 // void *kernarg_address;
635 // #endif
636 // uint64_t reserved2;
637 // hsa_signal_t completion_signal; // uint64_t wrapper
638 // } hsa_kernel_dispatch_packet_t
639 //
640 Function *DispatchPtrFn =
641 Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
642
643 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
644 DispatchPtr->addRetAttr(Attribute::NoAlias);
645 DispatchPtr->addRetAttr(Attribute::NonNull);
646 F.removeFnAttr("amdgpu-no-dispatch-ptr");
647
648 // Size of the dispatch packet struct.
649 DispatchPtr->addDereferenceableRetAttr(64);
650
651 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
652 Value *CastDispatchPtr = Builder.CreateBitCast(
653 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
654
655 // We could do a single 64-bit load here, but it's likely that the basic
656 // 32-bit and extract sequence is already present, and it is probably easier
657 // to CSE this. The loads should be mergeable later anyway.
658 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
659 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
660
661 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
662 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
663
664 MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
665 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
666 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
667 ST.makeLIDRangeMetadata(LoadZU);
668
669 // Extract y component. Upper half of LoadZU should be zero already.
670 Value *Y = Builder.CreateLShr(LoadXY, 16);
671
672 return std::pair(Y, LoadZU);
673}
674
675Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
676 unsigned N) {
677 Function *F = Builder.GetInsertBlock()->getParent();
680 StringRef AttrName;
681
682 switch (N) {
683 case 0:
684 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
685 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
686 AttrName = "amdgpu-no-workitem-id-x";
687 break;
688 case 1:
689 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
690 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
691 AttrName = "amdgpu-no-workitem-id-y";
692 break;
693
694 case 2:
695 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
696 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
697 AttrName = "amdgpu-no-workitem-id-z";
698 break;
699 default:
700 llvm_unreachable("invalid dimension");
701 }
702
703 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
704 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
705 ST.makeLIDRangeMetadata(CI);
706 F->removeFnAttr(AttrName);
707
708 return CI;
709}
710
711static bool isCallPromotable(CallInst *CI) {
712 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
713 if (!II)
714 return false;
715
716 switch (II->getIntrinsicID()) {
717 case Intrinsic::memcpy:
718 case Intrinsic::memmove:
719 case Intrinsic::memset:
720 case Intrinsic::lifetime_start:
721 case Intrinsic::lifetime_end:
722 case Intrinsic::invariant_start:
723 case Intrinsic::invariant_end:
724 case Intrinsic::launder_invariant_group:
725 case Intrinsic::strip_invariant_group:
726 case Intrinsic::objectsize:
727 return true;
728 default:
729 return false;
730 }
731}
732
733bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
734 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
735 int OpIdx1) const {
736 // Figure out which operand is the one we might not be promoting.
737 Value *OtherOp = Inst->getOperand(OpIdx0);
738 if (Val == OtherOp)
739 OtherOp = Inst->getOperand(OpIdx1);
740
741 if (isa<ConstantPointerNull>(OtherOp))
742 return true;
743
744 Value *OtherObj = getUnderlyingObject(OtherOp);
745 if (!isa<AllocaInst>(OtherObj))
746 return false;
747
748 // TODO: We should be able to replace undefs with the right pointer type.
749
750 // TODO: If we know the other base object is another promotable
751 // alloca, not necessarily this alloca, we can do this. The
752 // important part is both must have the same address space at
753 // the end.
754 if (OtherObj != BaseAlloca) {
756 dbgs() << "Found a binary instruction with another alloca object\n");
757 return false;
758 }
759
760 return true;
761}
762
763bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
764 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
765
766 for (User *User : Val->users()) {
767 if (is_contained(WorkList, User))
768 continue;
769
770 if (CallInst *CI = dyn_cast<CallInst>(User)) {
771 if (!isCallPromotable(CI))
772 return false;
773
774 WorkList.push_back(User);
775 continue;
776 }
777
778 Instruction *UseInst = cast<Instruction>(User);
779 if (UseInst->getOpcode() == Instruction::PtrToInt)
780 return false;
781
782 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
783 if (LI->isVolatile())
784 return false;
785
786 continue;
787 }
788
789 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
790 if (SI->isVolatile())
791 return false;
792
793 // Reject if the stored value is not the pointer operand.
794 if (SI->getPointerOperand() != Val)
795 return false;
796 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
797 if (RMW->isVolatile())
798 return false;
799 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
800 if (CAS->isVolatile())
801 return false;
802 }
803
804 // Only promote a select if we know that the other select operand
805 // is from another pointer that will also be promoted.
806 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
807 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
808 return false;
809
810 // May need to rewrite constant operands.
811 WorkList.push_back(ICmp);
812 }
813
814 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
815 // Give up if the pointer may be captured.
816 if (PointerMayBeCaptured(UseInst, true, true))
817 return false;
818 // Don't collect the users of this.
819 WorkList.push_back(User);
820 continue;
821 }
822
823 // Do not promote vector/aggregate type instructions. It is hard to track
824 // their users.
825 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
826 return false;
827
828 if (!User->getType()->isPointerTy())
829 continue;
830
831 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
832 // Be conservative if an address could be computed outside the bounds of
833 // the alloca.
834 if (!GEP->isInBounds())
835 return false;
836 }
837
838 // Only promote a select if we know that the other select operand is from
839 // another pointer that will also be promoted.
840 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
841 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
842 return false;
843 }
844
845 // Repeat for phis.
846 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
847 // TODO: Handle more complex cases. We should be able to replace loops
848 // over arrays.
849 switch (Phi->getNumIncomingValues()) {
850 case 1:
851 break;
852 case 2:
853 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
854 return false;
855 break;
856 default:
857 return false;
858 }
859 }
860
861 WorkList.push_back(User);
862 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
863 return false;
864 }
865
866 return true;
867}
868
869bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
870
871 FunctionType *FTy = F.getFunctionType();
873
874 // If the function has any arguments in the local address space, then it's
875 // possible these arguments require the entire local memory space, so
876 // we cannot use local memory in the pass.
877 for (Type *ParamTy : FTy->params()) {
878 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
879 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
880 LocalMemLimit = 0;
881 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
882 "local memory disabled.\n");
883 return false;
884 }
885 }
886
887 LocalMemLimit = ST.getAddressableLocalMemorySize();
888 if (LocalMemLimit == 0)
889 return false;
890
892 SmallPtrSet<const Constant *, 8> VisitedConstants;
894
895 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
896 for (const User *U : Val->users()) {
897 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
898 if (Use->getParent()->getParent() == &F)
899 return true;
900 } else {
901 const Constant *C = cast<Constant>(U);
902 if (VisitedConstants.insert(C).second)
903 Stack.push_back(C);
904 }
905 }
906
907 return false;
908 };
909
910 for (GlobalVariable &GV : Mod->globals()) {
912 continue;
913
914 if (visitUsers(&GV, &GV)) {
915 UsedLDS.insert(&GV);
916 Stack.clear();
917 continue;
918 }
919
920 // For any ConstantExpr uses, we need to recursively search the users until
921 // we see a function.
922 while (!Stack.empty()) {
923 const Constant *C = Stack.pop_back_val();
924 if (visitUsers(&GV, C)) {
925 UsedLDS.insert(&GV);
926 Stack.clear();
927 break;
928 }
929 }
930 }
931
932 const DataLayout &DL = Mod->getDataLayout();
933 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
934 AllocatedSizes.reserve(UsedLDS.size());
935
936 for (const GlobalVariable *GV : UsedLDS) {
937 Align Alignment =
938 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
939 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
940
941 // HIP uses an extern unsized array in local address space for dynamically
942 // allocated shared memory. In that case, we have to disable the promotion.
943 if (GV->hasExternalLinkage() && AllocSize == 0) {
944 LocalMemLimit = 0;
945 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
946 "local memory. Promoting to local memory "
947 "disabled.\n");
948 return false;
949 }
950
951 AllocatedSizes.emplace_back(AllocSize, Alignment);
952 }
953
954 // Sort to try to estimate the worst case alignment padding
955 //
956 // FIXME: We should really do something to fix the addresses to a more optimal
957 // value instead
958 llvm::sort(AllocatedSizes, llvm::less_second());
959
960 // Check how much local memory is being used by global objects
961 CurrentLocalMemUsage = 0;
962
963 // FIXME: Try to account for padding here. The real padding and address is
964 // currently determined from the inverse order of uses in the function when
965 // legalizing, which could also potentially change. We try to estimate the
966 // worst case here, but we probably should fix the addresses earlier.
967 for (auto Alloc : AllocatedSizes) {
968 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
969 CurrentLocalMemUsage += Alloc.first;
970 }
971
972 unsigned MaxOccupancy =
973 ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
974
975 // Restrict local memory usage so that we don't drastically reduce occupancy,
976 // unless it is already significantly reduced.
977
978 // TODO: Have some sort of hint or other heuristics to guess occupancy based
979 // on other factors..
980 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
981 if (OccupancyHint == 0)
982 OccupancyHint = 7;
983
984 // Clamp to max value.
985 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
986
987 // Check the hint but ignore it if it's obviously wrong from the existing LDS
988 // usage.
989 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
990
991 // Round up to the next tier of usage.
992 unsigned MaxSizeWithWaveCount =
993 ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
994
995 // Program is possibly broken by using more local mem than available.
996 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
997 return false;
998
999 LocalMemLimit = MaxSizeWithWaveCount;
1000
1001 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1002 << " bytes of LDS\n"
1003 << " Rounding size to " << MaxSizeWithWaveCount
1004 << " with a maximum occupancy of " << MaxOccupancy << '\n'
1005 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1006 << " available for promotion\n");
1007
1008 return true;
1009}
1010
1011// FIXME: Should try to pick the most likely to be profitable allocas first.
1012bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1013 bool SufficientLDS) {
1014 LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1015
1016 if (DisablePromoteAllocaToLDS) {
1017 LLVM_DEBUG(dbgs() << " Promote alloca to LDS is disabled\n");
1018 return false;
1019 }
1020
1021 const DataLayout &DL = Mod->getDataLayout();
1023
1024 const Function &ContainingFunction = *I.getParent()->getParent();
1025 CallingConv::ID CC = ContainingFunction.getCallingConv();
1026
1027 // Don't promote the alloca to LDS for shader calling conventions as the work
1028 // item ID intrinsics are not supported for these calling conventions.
1029 // Furthermore not all LDS is available for some of the stages.
1030 switch (CC) {
1033 break;
1034 default:
1035 LLVM_DEBUG(
1036 dbgs()
1037 << " promote alloca to LDS not supported with calling convention.\n");
1038 return false;
1039 }
1040
1041 // Not likely to have sufficient local memory for promotion.
1042 if (!SufficientLDS)
1043 return false;
1044
1045 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1046 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1047
1048 Align Alignment =
1049 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1050
1051 // FIXME: This computed padding is likely wrong since it depends on inverse
1052 // usage order.
1053 //
1054 // FIXME: It is also possible that if we're allowed to use all of the memory
1055 // could end up using more than the maximum due to alignment padding.
1056
1057 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1058 uint32_t AllocSize =
1059 WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1060 NewSize += AllocSize;
1061
1062 if (NewSize > LocalMemLimit) {
1063 LLVM_DEBUG(dbgs() << " " << AllocSize
1064 << " bytes of local memory not available to promote\n");
1065 return false;
1066 }
1067
1068 CurrentLocalMemUsage = NewSize;
1069
1070 std::vector<Value*> WorkList;
1071
1072 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1073 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1074 return false;
1075 }
1076
1077 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1078
1079 Function *F = I.getParent()->getParent();
1080
1081 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1084 Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1087 GV->setAlignment(I.getAlign());
1088
1089 Value *TCntY, *TCntZ;
1090
1091 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1092 Value *TIdX = getWorkitemID(Builder, 0);
1093 Value *TIdY = getWorkitemID(Builder, 1);
1094 Value *TIdZ = getWorkitemID(Builder, 2);
1095
1096 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1097 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1098 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1099 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1100 TID = Builder.CreateAdd(TID, TIdZ);
1101
1102 Value *Indices[] = {
1104 TID
1105 };
1106
1107 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1108 I.mutateType(Offset->getType());
1109 I.replaceAllUsesWith(Offset);
1110 I.eraseFromParent();
1111
1112 SmallVector<IntrinsicInst *> DeferredIntrs;
1113
1114 for (Value *V : WorkList) {
1115 CallInst *Call = dyn_cast<CallInst>(V);
1116 if (!Call) {
1117 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1118 Value *Src0 = CI->getOperand(0);
1120 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
1121
1122 if (isa<ConstantPointerNull>(CI->getOperand(0)))
1123 CI->setOperand(0, ConstantPointerNull::get(NewTy));
1124
1125 if (isa<ConstantPointerNull>(CI->getOperand(1)))
1126 CI->setOperand(1, ConstantPointerNull::get(NewTy));
1127
1128 continue;
1129 }
1130
1131 // The operand's value should be corrected on its own and we don't want to
1132 // touch the users.
1133 if (isa<AddrSpaceCastInst>(V))
1134 continue;
1135
1137 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
1138
1139 // FIXME: It doesn't really make sense to try to do this for all
1140 // instructions.
1141 V->mutateType(NewTy);
1142
1143 // Adjust the types of any constant operands.
1144 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1145 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1146 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1147
1148 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1149 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1150 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1151 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1152 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1153 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1154 }
1155 }
1156
1157 continue;
1158 }
1159
1160 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1161 Builder.SetInsertPoint(Intr);
1162 switch (Intr->getIntrinsicID()) {
1163 case Intrinsic::lifetime_start:
1164 case Intrinsic::lifetime_end:
1165 // These intrinsics are for address space 0 only
1166 Intr->eraseFromParent();
1167 continue;
1168 case Intrinsic::memcpy:
1169 case Intrinsic::memmove:
1170 // These have 2 pointer operands. In case if second pointer also needs
1171 // to be replaced we defer processing of these intrinsics until all
1172 // other values are processed.
1173 DeferredIntrs.push_back(Intr);
1174 continue;
1175 case Intrinsic::memset: {
1176 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1177 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1178 MemSet->getLength(), MemSet->getDestAlign(),
1179 MemSet->isVolatile());
1180 Intr->eraseFromParent();
1181 continue;
1182 }
1183 case Intrinsic::invariant_start:
1184 case Intrinsic::invariant_end:
1185 case Intrinsic::launder_invariant_group:
1186 case Intrinsic::strip_invariant_group:
1187 Intr->eraseFromParent();
1188 // FIXME: I think the invariant marker should still theoretically apply,
1189 // but the intrinsics need to be changed to accept pointers with any
1190 // address space.
1191 continue;
1192 case Intrinsic::objectsize: {
1193 Value *Src = Intr->getOperand(0);
1194 Function *ObjectSize = Intrinsic::getDeclaration(
1195 Mod, Intrinsic::objectsize,
1196 {Intr->getType(),
1198 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1199
1200 CallInst *NewCall = Builder.CreateCall(
1201 ObjectSize,
1202 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1203 Intr->replaceAllUsesWith(NewCall);
1204 Intr->eraseFromParent();
1205 continue;
1206 }
1207 default:
1208 Intr->print(errs());
1209 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1210 }
1211 }
1212
1213 for (IntrinsicInst *Intr : DeferredIntrs) {
1214 Builder.SetInsertPoint(Intr);
1215 Intrinsic::ID ID = Intr->getIntrinsicID();
1216 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1217
1218 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1219 auto *B =
1220 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1221 MI->getRawSource(), MI->getSourceAlign(),
1222 MI->getLength(), MI->isVolatile());
1223
1224 for (unsigned I = 0; I != 2; ++I) {
1225 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1226 B->addDereferenceableParamAttr(I, Bytes);
1227 }
1228 }
1229
1230 Intr->eraseFromParent();
1231 }
1232
1233 return true;
1234}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
AMDGPU promote alloca to vector or LDS
static Value * GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca, Type *VecElemTy, const DataLayout &DL)
static Value * calculateVectorIndex(Value *Ptr, const std::map< GetElementPtrInst *, Value * > &GEPIdx)
static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI, const DataLayout &DL)
static bool isCallPromotable(CallInst *CI)
#define DEBUG_TYPE
assume Assume Builder
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
Rewrite Partial Register Uses
AMD GCN specific subclass of TargetSubtarget.
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Module * Mod
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Target-Independent Code Generator Pass Configuration Options pass.
static const AMDGPUSubtarget & get(const MachineFunction &MF)
Class for arbitrary precision integers.
Definition: APInt.h:75
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1755
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:366
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:125
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:105
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:265
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:708
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:513
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
void addDereferenceableRetAttr(uint64_t Bytes)
adds the dereferenceable attribute to the list of attributes.
Definition: InstrTypes.h:1611
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Definition: InstrTypes.h:1532
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:754
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:174
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:128
bool hasExternalLinkage() const
Definition: GlobalValue.h:506
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:227
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:292
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2570
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1521
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:168
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
An instruction for reading from memory.
Definition: Instructions.h:177
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
size_type size() const
Definition: MapVector.h:61
std::pair< KeyT, ValueT > & front()
Definition: MapVector.h:84
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer if ...
Definition: DerivedTypes.h:677
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
This class represents the LLVM 'select' instruction.
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
static unsigned getPointerOperandIndex()
Definition: Instructions.h:395
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:256
static IntegerType * getInt32Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
iterator_range< user_iterator > users()
Definition: Value.h:421
iterator_range< use_iterator > uses()
Definition: Value.h:376
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
Definition: Type.cpp:745
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
TargetPassConfig.
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:393
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:392
const CustomOperand< const MCSubtargetInfo & > Msg[]
bool isEntryFunctionCC(CallingConv::ID CC)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:119
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:197
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:141
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1465
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:854
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
@ Length
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
FunctionPass * createAMDGPUPromoteAllocaToVector()
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1744
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
FunctionPass * createAMDGPUPromoteAlloca()
@ Mod
The access may modify the value stored in memory.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
char & AMDGPUPromoteAllocaID
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
char & AMDGPUPromoteAllocaToVectorID
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
#define N
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1546