LLVM 18.0.0git
AMDGPUPromoteAlloca.cpp
Go to the documentation of this file.
1//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Eliminates allocas by either converting them into vectors or by migrating
10// them to local address space.
11//
12// Two passes are exposed by this file:
13// - "promote-alloca-to-vector", which runs early in the pipeline and only
14// promotes to vector. Promotion to vector is almost always profitable
15// except when the alloca is too big and the promotion would result in
16// very high register pressure.
17// - "promote-alloca", which does both promotion to vector and LDS and runs
18// much later in the pipeline. This runs after SROA because promoting to
19// LDS is of course less profitable than getting rid of the alloca or
20// vectorizing it, thus we only want to do it when the only alternative is
21// lowering the alloca to stack.
22//
23// Note that both of them exist for the old and new PMs. The new PM passes are
24// declared in AMDGPU.h and the legacy PM ones are declared here.s
25//
26//===----------------------------------------------------------------------===//
27
28#include "AMDGPU.h"
29#include "GCNSubtarget.h"
31#include "llvm/ADT/STLExtras.h"
37#include "llvm/IR/IRBuilder.h"
39#include "llvm/IR/IntrinsicsAMDGPU.h"
40#include "llvm/IR/IntrinsicsR600.h"
42#include "llvm/Pass.h"
45
46#define DEBUG_TYPE "amdgpu-promote-alloca"
47
48using namespace llvm;
49
50namespace {
51
52static cl::opt<bool>
53 DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
54 cl::desc("Disable promote alloca to vector"),
55 cl::init(false));
56
57static cl::opt<bool>
58 DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
59 cl::desc("Disable promote alloca to LDS"),
60 cl::init(false));
61
62static cl::opt<unsigned> PromoteAllocaToVectorLimit(
63 "amdgpu-promote-alloca-to-vector-limit",
64 cl::desc("Maximum byte size to consider promote alloca to vector"),
65 cl::init(0));
66
67// Shared implementation which can do both promotion to vector and to LDS.
68class AMDGPUPromoteAllocaImpl {
69private:
70 const TargetMachine &TM;
71 Module *Mod = nullptr;
72 const DataLayout *DL = nullptr;
73
74 // FIXME: This should be per-kernel.
75 uint32_t LocalMemLimit = 0;
76 uint32_t CurrentLocalMemUsage = 0;
77 unsigned MaxVGPRs;
78
79 bool IsAMDGCN = false;
80 bool IsAMDHSA = false;
81
82 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
83 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
84
85 /// BaseAlloca is the alloca root the search started from.
86 /// Val may be that alloca or a recursive user of it.
87 bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
88 std::vector<Value *> &WorkList) const;
89
90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92 /// Returns true if both operands are derived from the same alloca. Val should
93 /// be the same value as one of the input operands of UseInst.
94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95 Instruction *UseInst, int OpIdx0,
96 int OpIdx1) const;
97
98 /// Check whether we have enough local memory for promotion.
99 bool hasSufficientLocalMem(const Function &F);
100
101 bool tryPromoteAllocaToVector(AllocaInst &I);
102 bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
103
104public:
105 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {
106 const Triple &TT = TM.getTargetTriple();
107 IsAMDGCN = TT.getArch() == Triple::amdgcn;
108 IsAMDHSA = TT.getOS() == Triple::AMDHSA;
109 }
110
111 bool run(Function &F, bool PromoteToLDS);
112};
113
114// FIXME: This can create globals so should be a module pass.
115class AMDGPUPromoteAlloca : public FunctionPass {
116public:
117 static char ID;
118
119 AMDGPUPromoteAlloca() : FunctionPass(ID) {}
120
121 bool runOnFunction(Function &F) override {
122 if (skipFunction(F))
123 return false;
124 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
125 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
126 .run(F, /*PromoteToLDS*/ true);
127 return false;
128 }
129
130 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
131
132 void getAnalysisUsage(AnalysisUsage &AU) const override {
133 AU.setPreservesCFG();
135 }
136};
137
138class AMDGPUPromoteAllocaToVector : public FunctionPass {
139public:
140 static char ID;
141
142 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
143
144 bool runOnFunction(Function &F) override {
145 if (skipFunction(F))
146 return false;
147 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
148 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
149 .run(F, /*PromoteToLDS*/ false);
150 return false;
151 }
152
153 StringRef getPassName() const override {
154 return "AMDGPU Promote Alloca to vector";
155 }
156
157 void getAnalysisUsage(AnalysisUsage &AU) const override {
158 AU.setPreservesCFG();
160 }
161};
162
163unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
164 if (!TM.getTargetTriple().isAMDGCN())
165 return 128;
166
167 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
168 unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
169
170 // A non-entry function has only 32 caller preserved registers.
171 // Do not promote alloca which will force spilling unless we know the function
172 // will be inlined.
173 if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
174 !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
175 MaxVGPRs = std::min(MaxVGPRs, 32u);
176 return MaxVGPRs;
177}
178
179} // end anonymous namespace
180
181char AMDGPUPromoteAlloca::ID = 0;
182char AMDGPUPromoteAllocaToVector::ID = 0;
183
185 "AMDGPU promote alloca to vector or LDS", false, false)
186// Move LDS uses from functions to kernels before promote alloca for accurate
187// estimation of LDS available
188INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
189INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
190 "AMDGPU promote alloca to vector or LDS", false, false)
191
192INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
193 "AMDGPU promote alloca to vector", false, false)
194
195char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
196char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
197
200 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ true);
201 if (Changed) {
204 return PA;
205 }
206 return PreservedAnalyses::all();
207}
208
211 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ false);
212 if (Changed) {
215 return PA;
216 }
217 return PreservedAnalyses::all();
218}
219
221 return new AMDGPUPromoteAlloca();
222}
223
225 return new AMDGPUPromoteAllocaToVector();
226}
227
228bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
229 Mod = F.getParent();
230 DL = &Mod->getDataLayout();
231
233 if (!ST.isPromoteAllocaEnabled())
234 return false;
235
236 MaxVGPRs = getMaxVGPRs(TM, F);
237
238 bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
239
241 for (Instruction &I : F.getEntryBlock()) {
242 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
243 // Array allocations are probably not worth handling, since an allocation
244 // of the array type is the canonical form.
245 if (!AI->isStaticAlloca() || AI->isArrayAllocation())
246 continue;
247 Allocas.push_back(AI);
248 }
249 }
250
251 bool Changed = false;
252 for (AllocaInst *AI : Allocas) {
253 if (tryPromoteAllocaToVector(*AI))
254 Changed = true;
255 else if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
256 Changed = true;
257 }
258
259 // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
260 // dangling pointers. If we want to reuse it past this point, the loop above
261 // would need to be updated to remove successfully promoted allocas.
262
263 return Changed;
264}
265
267 ConstantInt *SrcIndex = nullptr;
268 ConstantInt *DestIndex = nullptr;
269};
270
271// Checks if the instruction I is a memset user of the alloca AI that we can
272// deal with. Currently, only non-volatile memsets that affect the whole alloca
273// are handled.
275 const DataLayout &DL) {
276 using namespace PatternMatch;
277 // For now we only care about non-volatile memsets that affect the whole type
278 // (start at index 0 and fill the whole alloca).
279 //
280 // TODO: Now that we moved to PromoteAlloca we could handle any memsets
281 // (except maybe volatile ones?) - we just need to use shufflevector if it
282 // only affects a subset of the vector.
283 const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
284 return I->getOperand(0) == AI &&
285 match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
286}
287
288static Value *
290 const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
291 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
292 if (!GEP)
293 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
294
295 auto I = GEPIdx.find(GEP);
296 assert(I != GEPIdx.end() && "Must have entry for GEP!");
297 return I->second;
298}
299
301 Type *VecElemTy, const DataLayout &DL) {
302 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
303 // helper.
304 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
305 MapVector<Value *, APInt> VarOffsets;
306 APInt ConstOffset(BW, 0);
307 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
308 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
309 return nullptr;
310
311 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
312 if (VarOffsets.size() > 1)
313 return nullptr;
314
315 if (VarOffsets.size() == 1) {
316 // Only handle cases where we don't need to insert extra arithmetic
317 // instructions.
318 const auto &VarOffset = VarOffsets.front();
319 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
320 return nullptr;
321 return VarOffset.first;
322 }
323
324 APInt Quot;
325 uint64_t Rem;
326 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
327 if (Rem != 0)
328 return nullptr;
329
330 return ConstantInt::get(GEP->getContext(), Quot);
331}
332
333/// Promotes a single user of the alloca to a vector form.
334///
335/// \param Inst Instruction to be promoted.
336/// \param DL Module Data Layout.
337/// \param VectorTy Vectorized Type.
338/// \param VecStoreSize Size of \p VectorTy in bytes.
339/// \param ElementSize Size of \p VectorTy element type in bytes.
340/// \param TransferInfo MemTransferInst info map.
341/// \param GEPVectorIdx GEP -> VectorIdx cache.
342/// \param CurVal Current value of the vector (e.g. last stored value)
343/// \param[out] DeferredLoads \p Inst is added to this vector if it can't
344/// be promoted now. This happens when promoting requires \p
345/// CurVal, but \p CurVal is nullptr.
346/// \return the stored value if \p Inst would have written to the alloca, or
347/// nullptr otherwise.
349 Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
350 unsigned VecStoreSize, unsigned ElementSize,
352 std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
353 SmallVectorImpl<LoadInst *> &DeferredLoads) {
354 // Note: we use InstSimplifyFolder because it can leverage the DataLayout
355 // to do more folding, especially in the case of vector splats.
358 Builder.SetInsertPoint(Inst);
359
360 const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
361 if (CurVal)
362 return CurVal;
363
364 // If the current value is not known, insert a dummy load and lower it on
365 // the second pass.
366 LoadInst *Dummy =
367 Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
368 "promotealloca.dummyload");
369 DeferredLoads.push_back(Dummy);
370 return Dummy;
371 };
372
373 const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
374 Type *PtrTy) -> Value * {
375 assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
376 const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
377 if (!PtrTy->isVectorTy())
378 return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
379 const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
380 // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
381 // first cast the ptr vector to <2 x i64>.
382 assert((Size % NumPtrElts == 0) && "Vector size not divisble");
383 Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
384 return Builder.CreateBitOrPointerCast(
385 Val, FixedVectorType::get(EltTy, NumPtrElts));
386 };
387
388 Type *VecEltTy = VectorTy->getElementType();
389 const unsigned NumVecElts = VectorTy->getNumElements();
390
391 switch (Inst->getOpcode()) {
392 case Instruction::Load: {
393 // Loads can only be lowered if the value is known.
394 if (!CurVal) {
395 DeferredLoads.push_back(cast<LoadInst>(Inst));
396 return nullptr;
397 }
398
400 cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
401
402 // We're loading the full vector.
403 Type *AccessTy = Inst->getType();
404 TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
405 if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
406 if (AccessTy->isPtrOrPtrVectorTy())
407 CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
408 else if (CurVal->getType()->isPtrOrPtrVectorTy())
409 CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
410 Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
411 Inst->replaceAllUsesWith(NewVal);
412 return nullptr;
413 }
414
415 // Loading a subvector.
416 if (isa<FixedVectorType>(AccessTy)) {
417 assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
418 const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
419 auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
420 assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
421
422 unsigned IndexVal = cast<ConstantInt>(Index)->getZExtValue();
423 Value *SubVec = PoisonValue::get(SubVecTy);
424 for (unsigned K = 0; K < NumLoadedElts; ++K) {
425 SubVec = Builder.CreateInsertElement(
426 SubVec, Builder.CreateExtractElement(CurVal, IndexVal + K), K);
427 }
428
429 if (AccessTy->isPtrOrPtrVectorTy())
430 SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
431 else if (SubVecTy->isPtrOrPtrVectorTy())
432 SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
433
434 SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
435 Inst->replaceAllUsesWith(SubVec);
436 return nullptr;
437 }
438
439 // We're loading one element.
440 Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
441 if (AccessTy != VecEltTy)
442 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
443
444 Inst->replaceAllUsesWith(ExtractElement);
445 return nullptr;
446 }
447 case Instruction::Store: {
448 // For stores, it's a bit trickier and it depends on whether we're storing
449 // the full vector or not. If we're storing the full vector, we don't need
450 // to know the current value. If this is a store of a single element, we
451 // need to know the value.
452 StoreInst *SI = cast<StoreInst>(Inst);
453 Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
454 Value *Val = SI->getValueOperand();
455
456 // We're storing the full vector, we can handle this without knowing CurVal.
457 Type *AccessTy = Val->getType();
458 TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
459 if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
460 if (AccessTy->isPtrOrPtrVectorTy())
461 Val = CreateTempPtrIntCast(Val, AccessTy);
462 else if (VectorTy->isPtrOrPtrVectorTy())
463 Val = CreateTempPtrIntCast(Val, VectorTy);
464 return Builder.CreateBitOrPointerCast(Val, VectorTy);
465 }
466
467 // Storing a subvector.
468 if (isa<FixedVectorType>(AccessTy)) {
469 assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
470 const unsigned NumWrittenElts =
471 AccessSize / DL.getTypeStoreSize(VecEltTy);
472 auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
473 assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
474
475 if (SubVecTy->isPtrOrPtrVectorTy())
476 Val = CreateTempPtrIntCast(Val, SubVecTy);
477 else if (AccessTy->isPtrOrPtrVectorTy())
478 Val = CreateTempPtrIntCast(Val, AccessTy);
479
480 Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
481
482 unsigned IndexVal = cast<ConstantInt>(Index)->getZExtValue();
483 Value *CurVec = GetOrLoadCurrentVectorValue();
484 for (unsigned K = 0; K < NumWrittenElts && ((IndexVal + K) < NumVecElts);
485 ++K) {
486 CurVec = Builder.CreateInsertElement(
487 CurVec, Builder.CreateExtractElement(Val, K), IndexVal + K);
488 }
489 return CurVec;
490 }
491
492 if (Val->getType() != VecEltTy)
493 Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
494 return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
495 Index);
496 }
497 case Instruction::Call: {
498 if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
499 // For memcpy, we need to know curval.
500 ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
501 unsigned NumCopied = Length->getZExtValue() / ElementSize;
502 MemTransferInfo *TI = &TransferInfo[MTI];
503 unsigned SrcBegin = TI->SrcIndex->getZExtValue();
504 unsigned DestBegin = TI->DestIndex->getZExtValue();
505
506 SmallVector<int> Mask;
507 for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
508 if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
509 Mask.push_back(SrcBegin++);
510 } else {
511 Mask.push_back(Idx);
512 }
513 }
514
515 return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
516 }
517
518 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
519 // For memset, we don't need to know the previous value because we
520 // currently only allow memsets that cover the whole alloca.
521 Value *Elt = MSI->getOperand(1);
522 if (DL.getTypeStoreSize(VecEltTy) > 1) {
523 Value *EltBytes =
524 Builder.CreateVectorSplat(DL.getTypeStoreSize(VecEltTy), Elt);
525 Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
526 }
527
528 return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
529 }
530
531 llvm_unreachable("Unsupported call when promoting alloca to vector");
532 }
533
534 default:
535 llvm_unreachable("Inconsistency in instructions promotable to vector");
536 }
537
538 llvm_unreachable("Did not return after promoting instruction!");
539}
540
541static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
542 const DataLayout &DL) {
543 // Access as a vector type can work if the size of the access vector is a
544 // multiple of the size of the alloca's vector element type.
545 //
546 // Examples:
547 // - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
548 // - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
549 // - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
550 // - 3*32 is not a multiple of 64
551 //
552 // We could handle more complicated cases, but it'd make things a lot more
553 // complicated.
554 if (isa<FixedVectorType>(AccessTy)) {
555 TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
556 TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
557 return AccTS.isKnownMultipleOf(VecTS);
558 }
559
561 DL);
562}
563
564/// Iterates over an instruction worklist that may contain multiple instructions
565/// from the same basic block, but in a different order.
566template <typename InstContainer>
567static void forEachWorkListItem(const InstContainer &WorkList,
568 std::function<void(Instruction *)> Fn) {
569 // Bucket up uses of the alloca by the block they occur in.
570 // This is important because we have to handle multiple defs/uses in a block
571 // ourselves: SSAUpdater is purely for cross-block references.
573 for (Instruction *User : WorkList)
574 UsesByBlock[User->getParent()].insert(User);
575
576 for (Instruction *User : WorkList) {
577 BasicBlock *BB = User->getParent();
578 auto &BlockUses = UsesByBlock[BB];
579
580 // Already processed, skip.
581 if (BlockUses.empty())
582 continue;
583
584 // Only user in the block, directly process it.
585 if (BlockUses.size() == 1) {
586 Fn(User);
587 continue;
588 }
589
590 // Multiple users in the block, do a linear scan to see users in order.
591 for (Instruction &Inst : *BB) {
592 if (!BlockUses.contains(&Inst))
593 continue;
594
595 Fn(&Inst);
596 }
597
598 // Clear the block so we know it's been processed.
599 BlockUses.clear();
600 }
601}
602
603// FIXME: Should try to pick the most likely to be profitable allocas first.
604bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
605 LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
606
607 if (DisablePromoteAllocaToVector) {
608 LLVM_DEBUG(dbgs() << " Promote alloca to vector is disabled\n");
609 return false;
610 }
611
612 Type *AllocaTy = Alloca.getAllocatedType();
613 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
614 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
615 if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
616 ArrayTy->getNumElements() > 0)
617 VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
618 ArrayTy->getNumElements());
619 }
620
621 // Use up to 1/4 of available register budget for vectorization.
622 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
623 : (MaxVGPRs * 32);
624
625 if (DL->getTypeSizeInBits(AllocaTy) * 4 > Limit) {
626 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " << MaxVGPRs
627 << " registers available\n");
628 return false;
629 }
630
631 // FIXME: There is no reason why we can't support larger arrays, we
632 // are just being conservative for now.
633 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
634 // equivalent. Potentially these could also be promoted but we don't currently
635 // handle this case
636 if (!VectorTy) {
637 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
638 return false;
639 }
640
641 if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
642 LLVM_DEBUG(dbgs() << " " << *VectorTy
643 << " has an unsupported number of elements\n");
644 return false;
645 }
646
647 std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
649 SmallVector<Instruction *> UsersToRemove;
650 SmallVector<Instruction *> DeferredInsts;
653
654 const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
655 LLVM_DEBUG(dbgs() << " Cannot promote alloca to vector: " << Msg << "\n"
656 << " " << *Inst << "\n");
657 return false;
658 };
659
660 for (Use &U : Alloca.uses())
661 Uses.push_back(&U);
662
663 LLVM_DEBUG(dbgs() << " Attempting promotion to: " << *VectorTy << "\n");
664
665 Type *VecEltTy = VectorTy->getElementType();
666 unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
667 while (!Uses.empty()) {
668 Use *U = Uses.pop_back_val();
669 Instruction *Inst = cast<Instruction>(U->getUser());
670
671 if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
672 // This is a store of the pointer, not to the pointer.
673 if (isa<StoreInst>(Inst) &&
674 U->getOperandNo() != StoreInst::getPointerOperandIndex())
675 return RejectUser(Inst, "pointer is being stored");
676
677 Type *AccessTy = getLoadStoreType(Inst);
678 if (AccessTy->isAggregateType())
679 return RejectUser(Inst, "unsupported load/store as aggregate");
680 assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
681
682 Ptr = Ptr->stripPointerCasts();
683
684 // Alloca already accessed as vector.
685 if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
686 DL->getTypeStoreSize(AccessTy)) {
687 WorkList.push_back(Inst);
688 continue;
689 }
690
691 // Check that this is a simple access of a vector element.
692 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
693 : cast<StoreInst>(Inst)->isSimple();
694 if (!IsSimple)
695 return RejectUser(Inst, "not a simple load or store");
696 if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
697 return RejectUser(Inst, "not a supported access type");
698
699 WorkList.push_back(Inst);
700 continue;
701 }
702
703 if (isa<BitCastInst>(Inst)) {
704 // Look through bitcasts.
705 for (Use &U : Inst->uses())
706 Uses.push_back(&U);
707 UsersToRemove.push_back(Inst);
708 continue;
709 }
710
711 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
712 // If we can't compute a vector index from this GEP, then we can't
713 // promote this alloca to vector.
714 Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
715 if (!Index)
716 return RejectUser(Inst, "cannot compute vector index for GEP");
717
718 GEPVectorIdx[GEP] = Index;
719 for (Use &U : Inst->uses())
720 Uses.push_back(&U);
721 UsersToRemove.push_back(Inst);
722 continue;
723 }
724
725 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
726 MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
727 WorkList.push_back(Inst);
728 continue;
729 }
730
731 if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
732 if (TransferInst->isVolatile())
733 return RejectUser(Inst, "mem transfer inst is volatile");
734
735 ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
736 if (!Len || (Len->getZExtValue() % ElementSize))
737 return RejectUser(Inst, "mem transfer inst length is non-constant or "
738 "not a multiple of the vector element size");
739
740 if (!TransferInfo.count(TransferInst)) {
741 DeferredInsts.push_back(Inst);
742 WorkList.push_back(Inst);
743 TransferInfo[TransferInst] = MemTransferInfo();
744 }
745
746 auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
747 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
748 if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
749 return nullptr;
750
751 return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
752 };
753
754 unsigned OpNum = U->getOperandNo();
755 MemTransferInfo *TI = &TransferInfo[TransferInst];
756 if (OpNum == 0) {
757 Value *Dest = TransferInst->getDest();
758 ConstantInt *Index = getPointerIndexOfAlloca(Dest);
759 if (!Index)
760 return RejectUser(Inst, "could not calculate constant dest index");
761 TI->DestIndex = Index;
762 } else {
763 assert(OpNum == 1);
764 Value *Src = TransferInst->getSource();
765 ConstantInt *Index = getPointerIndexOfAlloca(Src);
766 if (!Index)
767 return RejectUser(Inst, "could not calculate constant src index");
768 TI->SrcIndex = Index;
769 }
770 continue;
771 }
772
773 // Ignore assume-like intrinsics and comparisons used in assumes.
774 if (isAssumeLikeIntrinsic(Inst)) {
775 UsersToRemove.push_back(Inst);
776 continue;
777 }
778
779 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
780 return isAssumeLikeIntrinsic(cast<Instruction>(U));
781 })) {
782 UsersToRemove.push_back(Inst);
783 continue;
784 }
785
786 return RejectUser(Inst, "unhandled alloca user");
787 }
788
789 while (!DeferredInsts.empty()) {
790 Instruction *Inst = DeferredInsts.pop_back_val();
791 MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
792 // TODO: Support the case if the pointers are from different alloca or
793 // from different address spaces.
794 MemTransferInfo &Info = TransferInfo[TransferInst];
795 if (!Info.SrcIndex || !Info.DestIndex)
796 return RejectUser(
797 Inst, "mem transfer inst is missing constant src and/or dst index");
798 }
799
800 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
801 << *VectorTy << '\n');
802 const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
803
804 // Alloca is uninitialized memory. Imitate that by making the first value
805 // undef.
806 SSAUpdater Updater;
807 Updater.Initialize(VectorTy, "promotealloca");
808 Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
809
810 // First handle the initial worklist.
811 SmallVector<LoadInst *, 4> DeferredLoads;
812 forEachWorkListItem(WorkList, [&](Instruction *I) {
813 BasicBlock *BB = I->getParent();
814 // On the first pass, we only take values that are trivially known, i.e.
815 // where AddAvailableValue was already called in this block.
817 I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
818 Updater.FindValueForBlock(BB), DeferredLoads);
819 if (Result)
820 Updater.AddAvailableValue(BB, Result);
821 });
822
823 // Then handle deferred loads.
824 forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
826 BasicBlock *BB = I->getParent();
827 // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
828 // get a value, inserting PHIs as needed.
830 I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
831 Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
832 if (Result)
833 Updater.AddAvailableValue(BB, Result);
834 assert(NewDLs.empty() && "No more deferred loads should be queued!");
835 });
836
837 // Delete all instructions. On the first pass, new dummy loads may have been
838 // added so we need to collect them too.
839 DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
840 InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
841 for (Instruction *I : InstsToDelete) {
842 assert(I->use_empty());
843 I->eraseFromParent();
844 }
845
846 // Delete all the users that are known to be removeable.
847 for (Instruction *I : reverse(UsersToRemove)) {
848 I->dropDroppableUses();
849 assert(I->use_empty());
850 I->eraseFromParent();
851 }
852
853 // Alloca should now be dead too.
854 assert(Alloca.use_empty());
855 Alloca.eraseFromParent();
856 return true;
857}
858
859std::pair<Value *, Value *>
860AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
861 Function &F = *Builder.GetInsertBlock()->getParent();
863
864 if (!IsAMDHSA) {
865 Function *LocalSizeYFn =
866 Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
867 Function *LocalSizeZFn =
868 Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
869
870 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
871 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
872
873 ST.makeLIDRangeMetadata(LocalSizeY);
874 ST.makeLIDRangeMetadata(LocalSizeZ);
875
876 return std::pair(LocalSizeY, LocalSizeZ);
877 }
878
879 // We must read the size out of the dispatch pointer.
880 assert(IsAMDGCN);
881
882 // We are indexing into this struct, and want to extract the workgroup_size_*
883 // fields.
884 //
885 // typedef struct hsa_kernel_dispatch_packet_s {
886 // uint16_t header;
887 // uint16_t setup;
888 // uint16_t workgroup_size_x ;
889 // uint16_t workgroup_size_y;
890 // uint16_t workgroup_size_z;
891 // uint16_t reserved0;
892 // uint32_t grid_size_x ;
893 // uint32_t grid_size_y ;
894 // uint32_t grid_size_z;
895 //
896 // uint32_t private_segment_size;
897 // uint32_t group_segment_size;
898 // uint64_t kernel_object;
899 //
900 // #ifdef HSA_LARGE_MODEL
901 // void *kernarg_address;
902 // #elif defined HSA_LITTLE_ENDIAN
903 // void *kernarg_address;
904 // uint32_t reserved1;
905 // #else
906 // uint32_t reserved1;
907 // void *kernarg_address;
908 // #endif
909 // uint64_t reserved2;
910 // hsa_signal_t completion_signal; // uint64_t wrapper
911 // } hsa_kernel_dispatch_packet_t
912 //
913 Function *DispatchPtrFn =
914 Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
915
916 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
917 DispatchPtr->addRetAttr(Attribute::NoAlias);
918 DispatchPtr->addRetAttr(Attribute::NonNull);
919 F.removeFnAttr("amdgpu-no-dispatch-ptr");
920
921 // Size of the dispatch packet struct.
922 DispatchPtr->addDereferenceableRetAttr(64);
923
924 Type *I32Ty = Type::getInt32Ty(Mod->getContext());
925 Value *CastDispatchPtr = Builder.CreateBitCast(
926 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
927
928 // We could do a single 64-bit load here, but it's likely that the basic
929 // 32-bit and extract sequence is already present, and it is probably easier
930 // to CSE this. The loads should be mergeable later anyway.
931 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
932 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
933
934 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
935 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
936
937 MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
938 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
939 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
940 ST.makeLIDRangeMetadata(LoadZU);
941
942 // Extract y component. Upper half of LoadZU should be zero already.
943 Value *Y = Builder.CreateLShr(LoadXY, 16);
944
945 return std::pair(Y, LoadZU);
946}
947
948Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
949 unsigned N) {
950 Function *F = Builder.GetInsertBlock()->getParent();
953 StringRef AttrName;
954
955 switch (N) {
956 case 0:
957 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
958 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
959 AttrName = "amdgpu-no-workitem-id-x";
960 break;
961 case 1:
962 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
963 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
964 AttrName = "amdgpu-no-workitem-id-y";
965 break;
966
967 case 2:
968 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
969 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
970 AttrName = "amdgpu-no-workitem-id-z";
971 break;
972 default:
973 llvm_unreachable("invalid dimension");
974 }
975
976 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
977 CallInst *CI = Builder.CreateCall(WorkitemIdFn);
978 ST.makeLIDRangeMetadata(CI);
979 F->removeFnAttr(AttrName);
980
981 return CI;
982}
983
984static bool isCallPromotable(CallInst *CI) {
985 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
986 if (!II)
987 return false;
988
989 switch (II->getIntrinsicID()) {
990 case Intrinsic::memcpy:
991 case Intrinsic::memmove:
992 case Intrinsic::memset:
993 case Intrinsic::lifetime_start:
994 case Intrinsic::lifetime_end:
995 case Intrinsic::invariant_start:
996 case Intrinsic::invariant_end:
997 case Intrinsic::launder_invariant_group:
998 case Intrinsic::strip_invariant_group:
999 case Intrinsic::objectsize:
1000 return true;
1001 default:
1002 return false;
1003 }
1004}
1005
1006bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1007 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
1008 int OpIdx1) const {
1009 // Figure out which operand is the one we might not be promoting.
1010 Value *OtherOp = Inst->getOperand(OpIdx0);
1011 if (Val == OtherOp)
1012 OtherOp = Inst->getOperand(OpIdx1);
1013
1014 if (isa<ConstantPointerNull>(OtherOp))
1015 return true;
1016
1017 Value *OtherObj = getUnderlyingObject(OtherOp);
1018 if (!isa<AllocaInst>(OtherObj))
1019 return false;
1020
1021 // TODO: We should be able to replace undefs with the right pointer type.
1022
1023 // TODO: If we know the other base object is another promotable
1024 // alloca, not necessarily this alloca, we can do this. The
1025 // important part is both must have the same address space at
1026 // the end.
1027 if (OtherObj != BaseAlloca) {
1028 LLVM_DEBUG(
1029 dbgs() << "Found a binary instruction with another alloca object\n");
1030 return false;
1031 }
1032
1033 return true;
1034}
1035
1036bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1037 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
1038
1039 for (User *User : Val->users()) {
1040 if (is_contained(WorkList, User))
1041 continue;
1042
1043 if (CallInst *CI = dyn_cast<CallInst>(User)) {
1044 if (!isCallPromotable(CI))
1045 return false;
1046
1047 WorkList.push_back(User);
1048 continue;
1049 }
1050
1051 Instruction *UseInst = cast<Instruction>(User);
1052 if (UseInst->getOpcode() == Instruction::PtrToInt)
1053 return false;
1054
1055 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
1056 if (LI->isVolatile())
1057 return false;
1058
1059 continue;
1060 }
1061
1062 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
1063 if (SI->isVolatile())
1064 return false;
1065
1066 // Reject if the stored value is not the pointer operand.
1067 if (SI->getPointerOperand() != Val)
1068 return false;
1069 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
1070 if (RMW->isVolatile())
1071 return false;
1072 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
1073 if (CAS->isVolatile())
1074 return false;
1075 }
1076
1077 // Only promote a select if we know that the other select operand
1078 // is from another pointer that will also be promoted.
1079 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
1080 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
1081 return false;
1082
1083 // May need to rewrite constant operands.
1084 WorkList.push_back(ICmp);
1085 }
1086
1087 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
1088 // Give up if the pointer may be captured.
1089 if (PointerMayBeCaptured(UseInst, true, true))
1090 return false;
1091 // Don't collect the users of this.
1092 WorkList.push_back(User);
1093 continue;
1094 }
1095
1096 // Do not promote vector/aggregate type instructions. It is hard to track
1097 // their users.
1098 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
1099 return false;
1100
1101 if (!User->getType()->isPointerTy())
1102 continue;
1103
1104 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
1105 // Be conservative if an address could be computed outside the bounds of
1106 // the alloca.
1107 if (!GEP->isInBounds())
1108 return false;
1109 }
1110
1111 // Only promote a select if we know that the other select operand is from
1112 // another pointer that will also be promoted.
1113 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
1114 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
1115 return false;
1116 }
1117
1118 // Repeat for phis.
1119 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
1120 // TODO: Handle more complex cases. We should be able to replace loops
1121 // over arrays.
1122 switch (Phi->getNumIncomingValues()) {
1123 case 1:
1124 break;
1125 case 2:
1126 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
1127 return false;
1128 break;
1129 default:
1130 return false;
1131 }
1132 }
1133
1134 WorkList.push_back(User);
1135 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
1136 return false;
1137 }
1138
1139 return true;
1140}
1141
1142bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
1143
1144 FunctionType *FTy = F.getFunctionType();
1146
1147 // If the function has any arguments in the local address space, then it's
1148 // possible these arguments require the entire local memory space, so
1149 // we cannot use local memory in the pass.
1150 for (Type *ParamTy : FTy->params()) {
1151 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
1152 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1153 LocalMemLimit = 0;
1154 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
1155 "local memory disabled.\n");
1156 return false;
1157 }
1158 }
1159
1160 LocalMemLimit = ST.getAddressableLocalMemorySize();
1161 if (LocalMemLimit == 0)
1162 return false;
1163
1165 SmallPtrSet<const Constant *, 8> VisitedConstants;
1167
1168 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1169 for (const User *U : Val->users()) {
1170 if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1171 if (Use->getParent()->getParent() == &F)
1172 return true;
1173 } else {
1174 const Constant *C = cast<Constant>(U);
1175 if (VisitedConstants.insert(C).second)
1176 Stack.push_back(C);
1177 }
1178 }
1179
1180 return false;
1181 };
1182
1183 for (GlobalVariable &GV : Mod->globals()) {
1185 continue;
1186
1187 if (visitUsers(&GV, &GV)) {
1188 UsedLDS.insert(&GV);
1189 Stack.clear();
1190 continue;
1191 }
1192
1193 // For any ConstantExpr uses, we need to recursively search the users until
1194 // we see a function.
1195 while (!Stack.empty()) {
1196 const Constant *C = Stack.pop_back_val();
1197 if (visitUsers(&GV, C)) {
1198 UsedLDS.insert(&GV);
1199 Stack.clear();
1200 break;
1201 }
1202 }
1203 }
1204
1205 const DataLayout &DL = Mod->getDataLayout();
1206 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1207 AllocatedSizes.reserve(UsedLDS.size());
1208
1209 for (const GlobalVariable *GV : UsedLDS) {
1210 Align Alignment =
1211 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1212 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
1213
1214 // HIP uses an extern unsized array in local address space for dynamically
1215 // allocated shared memory. In that case, we have to disable the promotion.
1216 if (GV->hasExternalLinkage() && AllocSize == 0) {
1217 LocalMemLimit = 0;
1218 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
1219 "local memory. Promoting to local memory "
1220 "disabled.\n");
1221 return false;
1222 }
1223
1224 AllocatedSizes.emplace_back(AllocSize, Alignment);
1225 }
1226
1227 // Sort to try to estimate the worst case alignment padding
1228 //
1229 // FIXME: We should really do something to fix the addresses to a more optimal
1230 // value instead
1231 llvm::sort(AllocatedSizes, llvm::less_second());
1232
1233 // Check how much local memory is being used by global objects
1234 CurrentLocalMemUsage = 0;
1235
1236 // FIXME: Try to account for padding here. The real padding and address is
1237 // currently determined from the inverse order of uses in the function when
1238 // legalizing, which could also potentially change. We try to estimate the
1239 // worst case here, but we probably should fix the addresses earlier.
1240 for (auto Alloc : AllocatedSizes) {
1241 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1242 CurrentLocalMemUsage += Alloc.first;
1243 }
1244
1245 unsigned MaxOccupancy =
1246 ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
1247
1248 // Restrict local memory usage so that we don't drastically reduce occupancy,
1249 // unless it is already significantly reduced.
1250
1251 // TODO: Have some sort of hint or other heuristics to guess occupancy based
1252 // on other factors..
1253 unsigned OccupancyHint = ST.getWavesPerEU(F).second;
1254 if (OccupancyHint == 0)
1255 OccupancyHint = 7;
1256
1257 // Clamp to max value.
1258 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
1259
1260 // Check the hint but ignore it if it's obviously wrong from the existing LDS
1261 // usage.
1262 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
1263
1264 // Round up to the next tier of usage.
1265 unsigned MaxSizeWithWaveCount =
1266 ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
1267
1268 // Program is possibly broken by using more local mem than available.
1269 if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
1270 return false;
1271
1272 LocalMemLimit = MaxSizeWithWaveCount;
1273
1274 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1275 << " bytes of LDS\n"
1276 << " Rounding size to " << MaxSizeWithWaveCount
1277 << " with a maximum occupancy of " << MaxOccupancy << '\n'
1278 << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1279 << " available for promotion\n");
1280
1281 return true;
1282}
1283
1284// FIXME: Should try to pick the most likely to be profitable allocas first.
1285bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1286 bool SufficientLDS) {
1287 LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1288
1289 if (DisablePromoteAllocaToLDS) {
1290 LLVM_DEBUG(dbgs() << " Promote alloca to LDS is disabled\n");
1291 return false;
1292 }
1293
1294 const DataLayout &DL = Mod->getDataLayout();
1296
1297 const Function &ContainingFunction = *I.getParent()->getParent();
1298 CallingConv::ID CC = ContainingFunction.getCallingConv();
1299
1300 // Don't promote the alloca to LDS for shader calling conventions as the work
1301 // item ID intrinsics are not supported for these calling conventions.
1302 // Furthermore not all LDS is available for some of the stages.
1303 switch (CC) {
1306 break;
1307 default:
1308 LLVM_DEBUG(
1309 dbgs()
1310 << " promote alloca to LDS not supported with calling convention.\n");
1311 return false;
1312 }
1313
1314 // Not likely to have sufficient local memory for promotion.
1315 if (!SufficientLDS)
1316 return false;
1317
1318 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1319 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1320
1321 Align Alignment =
1322 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1323
1324 // FIXME: This computed padding is likely wrong since it depends on inverse
1325 // usage order.
1326 //
1327 // FIXME: It is also possible that if we're allowed to use all of the memory
1328 // could end up using more than the maximum due to alignment padding.
1329
1330 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1331 uint32_t AllocSize =
1332 WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1333 NewSize += AllocSize;
1334
1335 if (NewSize > LocalMemLimit) {
1336 LLVM_DEBUG(dbgs() << " " << AllocSize
1337 << " bytes of local memory not available to promote\n");
1338 return false;
1339 }
1340
1341 CurrentLocalMemUsage = NewSize;
1342
1343 std::vector<Value *> WorkList;
1344
1345 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1346 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1347 return false;
1348 }
1349
1350 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1351
1352 Function *F = I.getParent()->getParent();
1353
1354 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1357 Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1360 GV->setAlignment(I.getAlign());
1361
1362 Value *TCntY, *TCntZ;
1363
1364 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1365 Value *TIdX = getWorkitemID(Builder, 0);
1366 Value *TIdY = getWorkitemID(Builder, 1);
1367 Value *TIdZ = getWorkitemID(Builder, 2);
1368
1369 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1370 Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1371 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1372 Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1373 TID = Builder.CreateAdd(TID, TIdZ);
1374
1375 LLVMContext &Context = Mod->getContext();
1376 Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
1377
1378 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1379 I.mutateType(Offset->getType());
1380 I.replaceAllUsesWith(Offset);
1381 I.eraseFromParent();
1382
1383 SmallVector<IntrinsicInst *> DeferredIntrs;
1384
1385 for (Value *V : WorkList) {
1386 CallInst *Call = dyn_cast<CallInst>(V);
1387 if (!Call) {
1388 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1390
1391 if (isa<ConstantPointerNull>(CI->getOperand(0)))
1392 CI->setOperand(0, ConstantPointerNull::get(NewTy));
1393
1394 if (isa<ConstantPointerNull>(CI->getOperand(1)))
1395 CI->setOperand(1, ConstantPointerNull::get(NewTy));
1396
1397 continue;
1398 }
1399
1400 // The operand's value should be corrected on its own and we don't want to
1401 // touch the users.
1402 if (isa<AddrSpaceCastInst>(V))
1403 continue;
1404
1406
1407 // FIXME: It doesn't really make sense to try to do this for all
1408 // instructions.
1409 V->mutateType(NewTy);
1410
1411 // Adjust the types of any constant operands.
1412 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1413 if (isa<ConstantPointerNull>(SI->getOperand(1)))
1414 SI->setOperand(1, ConstantPointerNull::get(NewTy));
1415
1416 if (isa<ConstantPointerNull>(SI->getOperand(2)))
1417 SI->setOperand(2, ConstantPointerNull::get(NewTy));
1418 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1419 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1420 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1421 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1422 }
1423 }
1424
1425 continue;
1426 }
1427
1428 IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1429 Builder.SetInsertPoint(Intr);
1430 switch (Intr->getIntrinsicID()) {
1431 case Intrinsic::lifetime_start:
1432 case Intrinsic::lifetime_end:
1433 // These intrinsics are for address space 0 only
1434 Intr->eraseFromParent();
1435 continue;
1436 case Intrinsic::memcpy:
1437 case Intrinsic::memmove:
1438 // These have 2 pointer operands. In case if second pointer also needs
1439 // to be replaced we defer processing of these intrinsics until all
1440 // other values are processed.
1441 DeferredIntrs.push_back(Intr);
1442 continue;
1443 case Intrinsic::memset: {
1444 MemSetInst *MemSet = cast<MemSetInst>(Intr);
1445 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1446 MemSet->getLength(), MemSet->getDestAlign(),
1447 MemSet->isVolatile());
1448 Intr->eraseFromParent();
1449 continue;
1450 }
1451 case Intrinsic::invariant_start:
1452 case Intrinsic::invariant_end:
1453 case Intrinsic::launder_invariant_group:
1454 case Intrinsic::strip_invariant_group:
1455 Intr->eraseFromParent();
1456 // FIXME: I think the invariant marker should still theoretically apply,
1457 // but the intrinsics need to be changed to accept pointers with any
1458 // address space.
1459 continue;
1460 case Intrinsic::objectsize: {
1461 Value *Src = Intr->getOperand(0);
1462 Function *ObjectSize = Intrinsic::getDeclaration(
1463 Mod, Intrinsic::objectsize,
1464 {Intr->getType(),
1466
1467 CallInst *NewCall = Builder.CreateCall(
1468 ObjectSize,
1469 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1470 Intr->replaceAllUsesWith(NewCall);
1471 Intr->eraseFromParent();
1472 continue;
1473 }
1474 default:
1475 Intr->print(errs());
1476 llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1477 }
1478 }
1479
1480 for (IntrinsicInst *Intr : DeferredIntrs) {
1481 Builder.SetInsertPoint(Intr);
1482 Intrinsic::ID ID = Intr->getIntrinsicID();
1483 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1484
1485 MemTransferInst *MI = cast<MemTransferInst>(Intr);
1486 auto *B = Builder.CreateMemTransferInst(
1487 ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
1488 MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1489
1490 for (unsigned I = 0; I != 2; ++I) {
1491 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1492 B->addDereferenceableParamAttr(I, Bytes);
1493 }
1494 }
1495
1496 Intr->eraseFromParent();
1497 }
1498
1499 return true;
1500}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
unsigned Intr
AMDGPU promote alloca to vector or LDS
static Value * GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca, Type *VecElemTy, const DataLayout &DL)
static Value * calculateVectorIndex(Value *Ptr, const std::map< GetElementPtrInst *, Value * > &GEPIdx)
static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy, const DataLayout &DL)
static void forEachWorkListItem(const InstContainer &WorkList, std::function< void(Instruction *)> Fn)
Iterates over an instruction worklist that may contain multiple instructions from the same basic bloc...
static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI, const DataLayout &DL)
static bool isCallPromotable(CallInst *CI)
#define DEBUG_TYPE
static Value * promoteAllocaUserToVector(Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy, unsigned VecStoreSize, unsigned ElementSize, DenseMap< MemTransferInst *, MemTransferInfo > &TransferInfo, std::map< GetElementPtrInst *, Value * > &GEPVectorIdx, Value *CurVal, SmallVectorImpl< LoadInst * > &DeferredLoads)
Promotes a single user of the alloca to a vector form.
assume Assume Builder
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
Rewrite Partial Register Uses
AMD GCN specific subclass of TargetSubtarget.
Hexagon Common GEP
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Module * Mod
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
Target-Independent Code Generator Pass Configuration Options pass.
static const AMDGPUSubtarget & get(const MachineFunction &MF)
Class for arbitrary precision integers.
Definition: APInt.h:76
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1764
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
an instruction to allocate memory on the stack
Definition: Instructions.h:58
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:118
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:648
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:513
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:718
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
void addDereferenceableRetAttr(uint64_t Bytes)
adds the dereferenceable attribute to the list of attributes.
Definition: InstrTypes.h:1610
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Definition: InstrTypes.h:1531
This class represents a function call, abstracting a target machine's calling convention.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:145
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:536
unsigned getNumElements() const
Definition: DerivedTypes.h:579
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:693
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:239
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalObject.
Definition: Globals.cpp:128
bool hasExternalLinkage() const
Definition: GlobalValue.h:506
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:227
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
Type * getValueType() const
Definition: GlobalValue.h:292
This instruction compares its operands according to the predicate given to the constructor.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2628
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
const BasicBlock * getParent() const
Definition: Instruction.h:90
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1521
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:195
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
Metadata node.
Definition: Metadata.h:950
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1416
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
size_type size() const
Definition: MapVector.h:60
std::pair< KeyT, ValueT > & front()
Definition: MapVector.h:83
Value * getLength() const
Value * getRawDest() const
MaybeAlign getDestAlign() const
bool isVolatile() const
Value * getValue() const
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
Helper class for SSA formation on a set of values defined in multiple blocks.
Definition: SSAUpdater.h:39
Value * FindValueForBlock(BasicBlock *BB) const
Return the value for the specified block if the SSAUpdater has one, otherwise return nullptr.
Definition: SSAUpdater.cpp:66
void Initialize(Type *Ty, StringRef Name)
Reset this object to get ready for a new set of SSA updates with type 'Ty'.
Definition: SSAUpdater.cpp:53
Value * GetValueInMiddleOfBlock(BasicBlock *BB)
Construct SSA form, materializing a value that is live in the middle of the specified block.
Definition: SSAUpdater.cpp:98
void AddAvailableValue(BasicBlock *BB, Value *V)
Indicate that a rewritten value is available in the specified block with the specified value.
Definition: SSAUpdater.cpp:70
This class represents the LLVM 'select' instruction.
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
static unsigned getPointerOperandIndex()
Definition: Instructions.h:395
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:252
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
iterator_range< user_iterator > users()
Definition: Value.h:421
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
iterator_range< use_iterator > uses()
Definition: Value.h:376
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
Definition: Type.cpp:684
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:638
Type * getElementType() const
Definition: DerivedTypes.h:433
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
Definition: TypeSize.h:175
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
TargetPassConfig.
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:395
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:394
const CustomOperand< const MCSubtargetInfo & > Msg[]
bool isEntryFunctionCC(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:197
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:141
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1422
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:862
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
@ Length
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
FunctionPass * createAMDGPUPromoteAllocaToVector()
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1652
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
FunctionPass * createAMDGPUPromoteAlloca()
@ Mod
The access may modify the value stored in memory.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
char & AMDGPUPromoteAllocaID
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1884
char & AMDGPUPromoteAllocaToVectorID
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
#define N
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1464