LLVM  16.0.0git
AMDGPUPromoteAlloca.cpp
Go to the documentation of this file.
1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/IR/IRBuilder.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/IntrinsicsAMDGPU.h"
23 #include "llvm/IR/IntrinsicsR600.h"
24 #include "llvm/Pass.h"
26 
27 #define DEBUG_TYPE "amdgpu-promote-alloca"
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 static cl::opt<bool> DisablePromoteAllocaToVector(
34  "disable-promote-alloca-to-vector",
35  cl::desc("Disable promote alloca to vector"),
36  cl::init(false));
37 
38 static cl::opt<bool> DisablePromoteAllocaToLDS(
39  "disable-promote-alloca-to-lds",
40  cl::desc("Disable promote alloca to LDS"),
41  cl::init(false));
42 
43 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44  "amdgpu-promote-alloca-to-vector-limit",
45  cl::desc("Maximum byte size to consider promote alloca to vector"),
46  cl::init(0));
47 
48 // FIXME: This can create globals so should be a module pass.
49 class AMDGPUPromoteAlloca : public FunctionPass {
50 public:
51  static char ID;
52 
53  AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54 
55  bool runOnFunction(Function &F) override;
56 
57  StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58 
59  bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60 
61  void getAnalysisUsage(AnalysisUsage &AU) const override {
62  AU.setPreservesCFG();
64  }
65 };
66 
67 class AMDGPUPromoteAllocaImpl {
68 private:
69  const TargetMachine &TM;
70  Module *Mod = nullptr;
71  const DataLayout *DL = nullptr;
72 
73  // FIXME: This should be per-kernel.
74  uint32_t LocalMemLimit = 0;
75  uint32_t CurrentLocalMemUsage = 0;
76  unsigned MaxVGPRs;
77 
78  bool IsAMDGCN = false;
79  bool IsAMDHSA = false;
80 
81  std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82  Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83 
84  /// BaseAlloca is the alloca root the search started from.
85  /// Val may be that alloca or a recursive user of it.
86  bool collectUsesWithPtrTypes(Value *BaseAlloca,
87  Value *Val,
88  std::vector<Value*> &WorkList) const;
89 
90  /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91  /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92  /// Returns true if both operands are derived from the same alloca. Val should
93  /// be the same value as one of the input operands of UseInst.
94  bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95  Instruction *UseInst,
96  int OpIdx0, int OpIdx1) const;
97 
98  /// Check whether we have enough local memory for promotion.
99  bool hasSufficientLocalMem(const Function &F);
100 
101  bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102 
103 public:
104  AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105  bool run(Function &F);
106 };
107 
108 class AMDGPUPromoteAllocaToVector : public FunctionPass {
109 public:
110  static char ID;
111 
112  AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113 
114  bool runOnFunction(Function &F) override;
115 
116  StringRef getPassName() const override {
117  return "AMDGPU Promote Alloca to vector";
118  }
119 
120  void getAnalysisUsage(AnalysisUsage &AU) const override {
121  AU.setPreservesCFG();
123  }
124 };
125 
126 } // end anonymous namespace
127 
128 char AMDGPUPromoteAlloca::ID = 0;
130 
131 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
132  "AMDGPU promote alloca to vector or LDS", false, false)
133 // Move LDS uses from functions to kernels before promote alloca for accurate
134 // estimation of LDS available
135 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)
136 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
137  "AMDGPU promote alloca to vector or LDS", false, false)
138 
139 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
140  "AMDGPU promote alloca to vector", false, false)
141 
142 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144 
145 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146  if (skipFunction(F))
147  return false;
148 
149  if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150  return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151  }
152  return false;
153 }
154 
157  bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158  if (Changed) {
160  PA.preserveSet<CFGAnalyses>();
161  return PA;
162  }
163  return PreservedAnalyses::all();
164 }
165 
167  Mod = F.getParent();
168  DL = &Mod->getDataLayout();
169 
170  const Triple &TT = TM.getTargetTriple();
171  IsAMDGCN = TT.getArch() == Triple::amdgcn;
172  IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173 
175  if (!ST.isPromoteAllocaEnabled())
176  return false;
177 
178  if (IsAMDGCN) {
179  const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180  MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181  // A non-entry function has only 32 caller preserved registers.
182  // Do not promote alloca which will force spilling.
183  if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184  MaxVGPRs = std::min(MaxVGPRs, 32u);
185  } else {
186  MaxVGPRs = 128;
187  }
188 
189  bool SufficientLDS = hasSufficientLocalMem(F);
190  bool Changed = false;
191  BasicBlock &EntryBB = *F.begin();
192 
194  for (Instruction &I : EntryBB) {
195  if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196  Allocas.push_back(AI);
197  }
198 
199  for (AllocaInst *AI : Allocas) {
200  if (handleAlloca(*AI, SufficientLDS))
201  Changed = true;
202  }
203 
204  return Changed;
205 }
206 
207 std::pair<Value *, Value *>
208 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209  Function &F = *Builder.GetInsertBlock()->getParent();
211 
212  if (!IsAMDHSA) {
213  Function *LocalSizeYFn
214  = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215  Function *LocalSizeZFn
216  = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217 
218  CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219  CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220 
221  ST.makeLIDRangeMetadata(LocalSizeY);
222  ST.makeLIDRangeMetadata(LocalSizeZ);
223 
224  return std::make_pair(LocalSizeY, LocalSizeZ);
225  }
226 
227  // We must read the size out of the dispatch pointer.
228  assert(IsAMDGCN);
229 
230  // We are indexing into this struct, and want to extract the workgroup_size_*
231  // fields.
232  //
233  // typedef struct hsa_kernel_dispatch_packet_s {
234  // uint16_t header;
235  // uint16_t setup;
236  // uint16_t workgroup_size_x ;
237  // uint16_t workgroup_size_y;
238  // uint16_t workgroup_size_z;
239  // uint16_t reserved0;
240  // uint32_t grid_size_x ;
241  // uint32_t grid_size_y ;
242  // uint32_t grid_size_z;
243  //
244  // uint32_t private_segment_size;
245  // uint32_t group_segment_size;
246  // uint64_t kernel_object;
247  //
248  // #ifdef HSA_LARGE_MODEL
249  // void *kernarg_address;
250  // #elif defined HSA_LITTLE_ENDIAN
251  // void *kernarg_address;
252  // uint32_t reserved1;
253  // #else
254  // uint32_t reserved1;
255  // void *kernarg_address;
256  // #endif
257  // uint64_t reserved2;
258  // hsa_signal_t completion_signal; // uint64_t wrapper
259  // } hsa_kernel_dispatch_packet_t
260  //
261  Function *DispatchPtrFn
262  = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263 
264  CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265  DispatchPtr->addRetAttr(Attribute::NoAlias);
266  DispatchPtr->addRetAttr(Attribute::NonNull);
267  F.removeFnAttr("amdgpu-no-dispatch-ptr");
268 
269  // Size of the dispatch packet struct.
270  DispatchPtr->addDereferenceableRetAttr(64);
271 
272  Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273  Value *CastDispatchPtr = Builder.CreateBitCast(
274  DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275 
276  // We could do a single 64-bit load here, but it's likely that the basic
277  // 32-bit and extract sequence is already present, and it is probably easier
278  // to CSE this. The loads should be mergeable later anyway.
279  Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280  LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281 
282  Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283  LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284 
285  MDNode *MD = MDNode::get(Mod->getContext(), None);
286  LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287  LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288  ST.makeLIDRangeMetadata(LoadZU);
289 
290  // Extract y component. Upper half of LoadZU should be zero already.
291  Value *Y = Builder.CreateLShr(LoadXY, 16);
292 
293  return std::make_pair(Y, LoadZU);
294 }
295 
296 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297  unsigned N) {
298  Function *F = Builder.GetInsertBlock()->getParent();
301  StringRef AttrName;
302 
303  switch (N) {
304  case 0:
305  IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306  : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307  AttrName = "amdgpu-no-workitem-id-x";
308  break;
309  case 1:
310  IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311  : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312  AttrName = "amdgpu-no-workitem-id-y";
313  break;
314 
315  case 2:
316  IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317  : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318  AttrName = "amdgpu-no-workitem-id-z";
319  break;
320  default:
321  llvm_unreachable("invalid dimension");
322  }
323 
324  Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325  CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326  ST.makeLIDRangeMetadata(CI);
327  F->removeFnAttr(AttrName);
328 
329  return CI;
330 }
331 
333  return FixedVectorType::get(ArrayTy->getElementType(),
334  ArrayTy->getNumElements());
335 }
336 
337 static Value *
339  const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
340  auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
341  if (!GEP)
342  return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
343 
344  auto I = GEPIdx.find(GEP);
345  assert(I != GEPIdx.end() && "Must have entry for GEP!");
346  return I->second;
347 }
348 
350  Type *VecElemTy, const DataLayout &DL) {
351  // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
352  // helper.
353  unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
354  MapVector<Value *, APInt> VarOffsets;
355  APInt ConstOffset(BW, 0);
356  if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
357  !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
358  return nullptr;
359 
360  unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
361  if (VarOffsets.size() > 1)
362  return nullptr;
363 
364  if (VarOffsets.size() == 1) {
365  // Only handle cases where we don't need to insert extra arithmetic
366  // instructions.
367  const auto &VarOffset = VarOffsets.front();
368  if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
369  return nullptr;
370  return VarOffset.first;
371  }
372 
373  APInt Quot;
374  uint64_t Rem;
375  APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
376  if (Rem != 0)
377  return nullptr;
378 
379  return ConstantInt::get(GEP->getContext(), Quot);
380 }
381 
382 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
383  unsigned MaxVGPRs) {
384 
385  if (DisablePromoteAllocaToVector) {
386  LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n");
387  return false;
388  }
389 
390  Type *AllocaTy = Alloca->getAllocatedType();
391  auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
392  if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
393  if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
394  ArrayTy->getNumElements() > 0)
395  VectorTy = arrayTypeToVecType(ArrayTy);
396  }
397 
398  // Use up to 1/4 of available register budget for vectorization.
399  unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
400  : (MaxVGPRs * 32);
401 
402  if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
403  LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with "
404  << MaxVGPRs << " registers available\n");
405  return false;
406  }
407 
408  LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
409 
410  // FIXME: There is no reason why we can't support larger arrays, we
411  // are just being conservative for now.
412  // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
413  // could also be promoted but we don't currently handle this case
414  if (!VectorTy || VectorTy->getNumElements() > 16 ||
415  VectorTy->getNumElements() < 2) {
416  LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
417  return false;
418  }
419 
420  std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
423  for (Use &U : Alloca->uses())
424  Uses.push_back(&U);
425 
426  Type *VecEltTy = VectorTy->getElementType();
427  while (!Uses.empty()) {
428  Use *U = Uses.pop_back_val();
429  Instruction *Inst = dyn_cast<Instruction>(U->getUser());
430 
431  if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
432  // This is a store of the pointer, not to the pointer.
433  if (isa<StoreInst>(Inst) &&
434  U->getOperandNo() != StoreInst::getPointerOperandIndex())
435  return false;
436 
437  Type *AccessTy = getLoadStoreType(Inst);
438  Ptr = Ptr->stripPointerCasts();
439 
440  // Alloca already accessed as vector, leave alone.
441  if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) ==
442  DL.getTypeStoreSize(AccessTy))
443  continue;
444 
445  // Check that this is a simple access of a vector element.
446  bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
447  : cast<StoreInst>(Inst)->isSimple();
448  if (!IsSimple ||
449  !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL))
450  return false;
451 
452  WorkList.push_back(Inst);
453  continue;
454  }
455 
456  if (isa<BitCastInst>(Inst)) {
457  // Look through bitcasts.
458  for (Use &U : Inst->uses())
459  Uses.push_back(&U);
460  continue;
461  }
462 
463  if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
464  // If we can't compute a vector index from this GEP, then we can't
465  // promote this alloca to vector.
466  Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL);
467  if (!Index) {
468  LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP
469  << '\n');
470  return false;
471  }
472 
473  GEPVectorIdx[GEP] = Index;
474  for (Use &U : Inst->uses())
475  Uses.push_back(&U);
476  continue;
477  }
478 
479  // Ignore assume-like intrinsics and comparisons used in assumes.
480  if (isAssumeLikeIntrinsic(Inst))
481  continue;
482 
483  if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
484  return isAssumeLikeIntrinsic(cast<Instruction>(U));
485  }))
486  continue;
487 
488  // Unknown user.
489  return false;
490  }
491 
492  LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
493  << *VectorTy << '\n');
494 
495  for (Instruction *Inst : WorkList) {
496  IRBuilder<> Builder(Inst);
497  switch (Inst->getOpcode()) {
498  case Instruction::Load: {
499  Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
500  Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
501  Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
502  Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
503  Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
504  Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
505  if (Inst->getType() != VecEltTy)
506  ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
507  Inst->replaceAllUsesWith(ExtractElement);
508  Inst->eraseFromParent();
509  break;
510  }
511  case Instruction::Store: {
512  StoreInst *SI = cast<StoreInst>(Inst);
513  Value *Ptr = SI->getPointerOperand();
514  Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
515  Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
516  Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
517  Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
518  Value *Elt = SI->getValueOperand();
519  if (Elt->getType() != VecEltTy)
520  Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
521  Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
522  Builder.CreateStore(NewVecValue, BitCast);
523  Inst->eraseFromParent();
524  break;
525  }
526 
527  default:
528  llvm_unreachable("Inconsistency in instructions promotable to vector");
529  }
530  }
531  return true;
532 }
533 
534 static bool isCallPromotable(CallInst *CI) {
535  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
536  if (!II)
537  return false;
538 
539  switch (II->getIntrinsicID()) {
540  case Intrinsic::memcpy:
541  case Intrinsic::memmove:
542  case Intrinsic::memset:
543  case Intrinsic::lifetime_start:
544  case Intrinsic::lifetime_end:
545  case Intrinsic::invariant_start:
546  case Intrinsic::invariant_end:
547  case Intrinsic::launder_invariant_group:
548  case Intrinsic::strip_invariant_group:
549  case Intrinsic::objectsize:
550  return true;
551  default:
552  return false;
553  }
554 }
555 
556 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
557  Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
558  int OpIdx1) const {
559  // Figure out which operand is the one we might not be promoting.
560  Value *OtherOp = Inst->getOperand(OpIdx0);
561  if (Val == OtherOp)
562  OtherOp = Inst->getOperand(OpIdx1);
563 
564  if (isa<ConstantPointerNull>(OtherOp))
565  return true;
566 
567  Value *OtherObj = getUnderlyingObject(OtherOp);
568  if (!isa<AllocaInst>(OtherObj))
569  return false;
570 
571  // TODO: We should be able to replace undefs with the right pointer type.
572 
573  // TODO: If we know the other base object is another promotable
574  // alloca, not necessarily this alloca, we can do this. The
575  // important part is both must have the same address space at
576  // the end.
577  if (OtherObj != BaseAlloca) {
578  LLVM_DEBUG(
579  dbgs() << "Found a binary instruction with another alloca object\n");
580  return false;
581  }
582 
583  return true;
584 }
585 
586 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
587  Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
588 
589  for (User *User : Val->users()) {
590  if (is_contained(WorkList, User))
591  continue;
592 
593  if (CallInst *CI = dyn_cast<CallInst>(User)) {
594  if (!isCallPromotable(CI))
595  return false;
596 
597  WorkList.push_back(User);
598  continue;
599  }
600 
601  Instruction *UseInst = cast<Instruction>(User);
602  if (UseInst->getOpcode() == Instruction::PtrToInt)
603  return false;
604 
605  if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
606  if (LI->isVolatile())
607  return false;
608 
609  continue;
610  }
611 
612  if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
613  if (SI->isVolatile())
614  return false;
615 
616  // Reject if the stored value is not the pointer operand.
617  if (SI->getPointerOperand() != Val)
618  return false;
619  } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
620  if (RMW->isVolatile())
621  return false;
622  } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
623  if (CAS->isVolatile())
624  return false;
625  }
626 
627  // Only promote a select if we know that the other select operand
628  // is from another pointer that will also be promoted.
629  if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
630  if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
631  return false;
632 
633  // May need to rewrite constant operands.
634  WorkList.push_back(ICmp);
635  }
636 
637  if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
638  // Give up if the pointer may be captured.
639  if (PointerMayBeCaptured(UseInst, true, true))
640  return false;
641  // Don't collect the users of this.
642  WorkList.push_back(User);
643  continue;
644  }
645 
646  // Do not promote vector/aggregate type instructions. It is hard to track
647  // their users.
648  if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
649  return false;
650 
651  if (!User->getType()->isPointerTy())
652  continue;
653 
654  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
655  // Be conservative if an address could be computed outside the bounds of
656  // the alloca.
657  if (!GEP->isInBounds())
658  return false;
659  }
660 
661  // Only promote a select if we know that the other select operand is from
662  // another pointer that will also be promoted.
663  if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
664  if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
665  return false;
666  }
667 
668  // Repeat for phis.
669  if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
670  // TODO: Handle more complex cases. We should be able to replace loops
671  // over arrays.
672  switch (Phi->getNumIncomingValues()) {
673  case 1:
674  break;
675  case 2:
676  if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
677  return false;
678  break;
679  default:
680  return false;
681  }
682  }
683 
684  WorkList.push_back(User);
685  if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
686  return false;
687  }
688 
689  return true;
690 }
691 
692 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
693 
694  FunctionType *FTy = F.getFunctionType();
696 
697  // If the function has any arguments in the local address space, then it's
698  // possible these arguments require the entire local memory space, so
699  // we cannot use local memory in the pass.
700  for (Type *ParamTy : FTy->params()) {
701  PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
702  if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
703  LocalMemLimit = 0;
704  LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
705  "local memory disabled.\n");
706  return false;
707  }
708  }
709 
710  LocalMemLimit = ST.getLocalMemorySize();
711  if (LocalMemLimit == 0)
712  return false;
713 
715  SmallPtrSet<const Constant *, 8> VisitedConstants;
717 
718  auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
719  for (const User *U : Val->users()) {
720  if (const Instruction *Use = dyn_cast<Instruction>(U)) {
721  if (Use->getParent()->getParent() == &F)
722  return true;
723  } else {
724  const Constant *C = cast<Constant>(U);
725  if (VisitedConstants.insert(C).second)
726  Stack.push_back(C);
727  }
728  }
729 
730  return false;
731  };
732 
733  for (GlobalVariable &GV : Mod->globals()) {
735  continue;
736 
737  if (visitUsers(&GV, &GV)) {
738  UsedLDS.insert(&GV);
739  Stack.clear();
740  continue;
741  }
742 
743  // For any ConstantExpr uses, we need to recursively search the users until
744  // we see a function.
745  while (!Stack.empty()) {
746  const Constant *C = Stack.pop_back_val();
747  if (visitUsers(&GV, C)) {
748  UsedLDS.insert(&GV);
749  Stack.clear();
750  break;
751  }
752  }
753  }
754 
755  const DataLayout &DL = Mod->getDataLayout();
756  SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
757  AllocatedSizes.reserve(UsedLDS.size());
758 
759  for (const GlobalVariable *GV : UsedLDS) {
760  Align Alignment =
761  DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
762  uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
763 
764  // HIP uses an extern unsized array in local address space for dynamically
765  // allocated shared memory. In that case, we have to disable the promotion.
766  if (GV->hasExternalLinkage() && AllocSize == 0) {
767  LocalMemLimit = 0;
768  LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
769  "local memory. Promoting to local memory "
770  "disabled.\n");
771  return false;
772  }
773 
774  AllocatedSizes.emplace_back(AllocSize, Alignment);
775  }
776 
777  // Sort to try to estimate the worst case alignment padding
778  //
779  // FIXME: We should really do something to fix the addresses to a more optimal
780  // value instead
781  llvm::sort(AllocatedSizes, llvm::less_second());
782 
783  // Check how much local memory is being used by global objects
784  CurrentLocalMemUsage = 0;
785 
786  // FIXME: Try to account for padding here. The real padding and address is
787  // currently determined from the inverse order of uses in the function when
788  // legalizing, which could also potentially change. We try to estimate the
789  // worst case here, but we probably should fix the addresses earlier.
790  for (auto Alloc : AllocatedSizes) {
791  CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
792  CurrentLocalMemUsage += Alloc.first;
793  }
794 
795  unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
796  F);
797 
798  // Restrict local memory usage so that we don't drastically reduce occupancy,
799  // unless it is already significantly reduced.
800 
801  // TODO: Have some sort of hint or other heuristics to guess occupancy based
802  // on other factors..
803  unsigned OccupancyHint = ST.getWavesPerEU(F).second;
804  if (OccupancyHint == 0)
805  OccupancyHint = 7;
806 
807  // Clamp to max value.
808  OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
809 
810  // Check the hint but ignore it if it's obviously wrong from the existing LDS
811  // usage.
812  MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
813 
814 
815  // Round up to the next tier of usage.
816  unsigned MaxSizeWithWaveCount
817  = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
818 
819  // Program is possibly broken by using more local mem than available.
820  if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
821  return false;
822 
823  LocalMemLimit = MaxSizeWithWaveCount;
824 
825  LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
826  << " bytes of LDS\n"
827  << " Rounding size to " << MaxSizeWithWaveCount
828  << " with a maximum occupancy of " << MaxOccupancy << '\n'
829  << " and " << (LocalMemLimit - CurrentLocalMemUsage)
830  << " available for promotion\n");
831 
832  return true;
833 }
834 
835 // FIXME: Should try to pick the most likely to be profitable allocas first.
836 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
837  // Array allocations are probably not worth handling, since an allocation of
838  // the array type is the canonical form.
839  if (!I.isStaticAlloca() || I.isArrayAllocation())
840  return false;
841 
842  const DataLayout &DL = Mod->getDataLayout();
844 
845  // First try to replace the alloca with a vector
846  Type *AllocaTy = I.getAllocatedType();
847 
848  LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
849 
850  if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
851  return true; // Promoted to vector.
852 
853  if (DisablePromoteAllocaToLDS)
854  return false;
855 
856  const Function &ContainingFunction = *I.getParent()->getParent();
857  CallingConv::ID CC = ContainingFunction.getCallingConv();
858 
859  // Don't promote the alloca to LDS for shader calling conventions as the work
860  // item ID intrinsics are not supported for these calling conventions.
861  // Furthermore not all LDS is available for some of the stages.
862  switch (CC) {
865  break;
866  default:
867  LLVM_DEBUG(
868  dbgs()
869  << " promote alloca to LDS not supported with calling convention.\n");
870  return false;
871  }
872 
873  // Not likely to have sufficient local memory for promotion.
874  if (!SufficientLDS)
875  return false;
876 
877  const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
878  unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
879 
880  Align Alignment =
881  DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
882 
883  // FIXME: This computed padding is likely wrong since it depends on inverse
884  // usage order.
885  //
886  // FIXME: It is also possible that if we're allowed to use all of the memory
887  // could end up using more than the maximum due to alignment padding.
888 
889  uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
890  uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
891  NewSize += AllocSize;
892 
893  if (NewSize > LocalMemLimit) {
894  LLVM_DEBUG(dbgs() << " " << AllocSize
895  << " bytes of local memory not available to promote\n");
896  return false;
897  }
898 
899  CurrentLocalMemUsage = NewSize;
900 
901  std::vector<Value*> WorkList;
902 
903  if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
904  LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
905  return false;
906  }
907 
908  LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
909 
910  Function *F = I.getParent()->getParent();
911 
912  Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
913  GlobalVariable *GV = new GlobalVariable(
914  *Mod, GVTy, false, GlobalValue::InternalLinkage,
915  UndefValue::get(GVTy),
916  Twine(F->getName()) + Twine('.') + I.getName(),
917  nullptr,
921  GV->setAlignment(I.getAlign());
922 
923  Value *TCntY, *TCntZ;
924 
925  std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
926  Value *TIdX = getWorkitemID(Builder, 0);
927  Value *TIdY = getWorkitemID(Builder, 1);
928  Value *TIdZ = getWorkitemID(Builder, 2);
929 
930  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
931  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
932  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
933  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
934  TID = Builder.CreateAdd(TID, TIdZ);
935 
936  Value *Indices[] = {
938  TID
939  };
940 
941  Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
942  I.mutateType(Offset->getType());
943  I.replaceAllUsesWith(Offset);
944  I.eraseFromParent();
945 
946  SmallVector<IntrinsicInst *> DeferredIntrs;
947 
948  for (Value *V : WorkList) {
949  CallInst *Call = dyn_cast<CallInst>(V);
950  if (!Call) {
951  if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
952  Value *Src0 = CI->getOperand(0);
954  cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
955 
956  if (isa<ConstantPointerNull>(CI->getOperand(0)))
957  CI->setOperand(0, ConstantPointerNull::get(NewTy));
958 
959  if (isa<ConstantPointerNull>(CI->getOperand(1)))
960  CI->setOperand(1, ConstantPointerNull::get(NewTy));
961 
962  continue;
963  }
964 
965  // The operand's value should be corrected on its own and we don't want to
966  // touch the users.
967  if (isa<AddrSpaceCastInst>(V))
968  continue;
969 
971  cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
972 
973  // FIXME: It doesn't really make sense to try to do this for all
974  // instructions.
975  V->mutateType(NewTy);
976 
977  // Adjust the types of any constant operands.
978  if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
979  if (isa<ConstantPointerNull>(SI->getOperand(1)))
980  SI->setOperand(1, ConstantPointerNull::get(NewTy));
981 
982  if (isa<ConstantPointerNull>(SI->getOperand(2)))
983  SI->setOperand(2, ConstantPointerNull::get(NewTy));
984  } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
985  for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
986  if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
987  Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
988  }
989  }
990 
991  continue;
992  }
993 
994  IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
995  Builder.SetInsertPoint(Intr);
996  switch (Intr->getIntrinsicID()) {
997  case Intrinsic::lifetime_start:
998  case Intrinsic::lifetime_end:
999  // These intrinsics are for address space 0 only
1000  Intr->eraseFromParent();
1001  continue;
1002  case Intrinsic::memcpy:
1003  case Intrinsic::memmove:
1004  // These have 2 pointer operands. In case if second pointer also needs
1005  // to be replaced we defer processing of these intrinsics until all
1006  // other values are processed.
1007  DeferredIntrs.push_back(Intr);
1008  continue;
1009  case Intrinsic::memset: {
1010  MemSetInst *MemSet = cast<MemSetInst>(Intr);
1011  Builder.CreateMemSet(
1012  MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1013  MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1014  Intr->eraseFromParent();
1015  continue;
1016  }
1017  case Intrinsic::invariant_start:
1018  case Intrinsic::invariant_end:
1019  case Intrinsic::launder_invariant_group:
1020  case Intrinsic::strip_invariant_group:
1021  Intr->eraseFromParent();
1022  // FIXME: I think the invariant marker should still theoretically apply,
1023  // but the intrinsics need to be changed to accept pointers with any
1024  // address space.
1025  continue;
1026  case Intrinsic::objectsize: {
1027  Value *Src = Intr->getOperand(0);
1028  Function *ObjectSize = Intrinsic::getDeclaration(
1029  Mod, Intrinsic::objectsize,
1030  {Intr->getType(),
1032  cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1033 
1034  CallInst *NewCall = Builder.CreateCall(
1035  ObjectSize,
1036  {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1037  Intr->replaceAllUsesWith(NewCall);
1038  Intr->eraseFromParent();
1039  continue;
1040  }
1041  default:
1042  Intr->print(errs());
1043  llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1044  }
1045  }
1046 
1047  for (IntrinsicInst *Intr : DeferredIntrs) {
1048  Builder.SetInsertPoint(Intr);
1049  Intrinsic::ID ID = Intr->getIntrinsicID();
1050  assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1051 
1052  MemTransferInst *MI = cast<MemTransferInst>(Intr);
1053  auto *B =
1054  Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1055  MI->getRawSource(), MI->getSourceAlign(),
1056  MI->getLength(), MI->isVolatile());
1057 
1058  for (unsigned I = 0; I != 2; ++I) {
1059  if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1060  B->addDereferenceableParamAttr(I, Bytes);
1061  }
1062  }
1063 
1064  Intr->eraseFromParent();
1065  }
1066 
1067  return true;
1068 }
1069 
1070 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1071  // Array allocations are probably not worth handling, since an allocation of
1072  // the array type is the canonical form.
1073  if (!I.isStaticAlloca() || I.isArrayAllocation())
1074  return false;
1075 
1076  LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
1077 
1078  Module *Mod = I.getParent()->getParent()->getParent();
1079  return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1080 }
1081 
1083  if (DisablePromoteAllocaToVector)
1084  return false;
1085 
1087  if (!ST.isPromoteAllocaEnabled())
1088  return false;
1089 
1090  unsigned MaxVGPRs;
1091  if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1092  const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1093  MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1094  // A non-entry function has only 32 caller preserved registers.
1095  // Do not promote alloca which will force spilling.
1096  if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1097  MaxVGPRs = std::min(MaxVGPRs, 32u);
1098  } else {
1099  MaxVGPRs = 128;
1100  }
1101 
1102  bool Changed = false;
1103  BasicBlock &EntryBB = *F.begin();
1104 
1106  for (Instruction &I : EntryBB) {
1107  if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1108  Allocas.push_back(AI);
1109  }
1110 
1111  for (AllocaInst *AI : Allocas) {
1112  if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
1113  Changed = true;
1114  }
1115 
1116  return Changed;
1117 }
1118 
1120  if (skipFunction(F))
1121  return false;
1122  if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1123  return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1124  }
1125  return false;
1126 }
1127 
1130  bool Changed = promoteAllocasToVector(F, TM);
1131  if (Changed) {
1132  PreservedAnalyses PA;
1133  PA.preserveSet<CFGAnalyses>();
1134  return PA;
1135  }
1136  return PreservedAnalyses::all();
1137 }
1138 
1140  return new AMDGPUPromoteAlloca();
1141 }
1142 
1144  return new AMDGPUPromoteAllocaToVector();
1145 }
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
llvm::alignTo
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:156
llvm::less_second
Function object to check whether the second component of a std::pair compares less than the second co...
Definition: STLExtras.h:1340
llvm::MemIntrinsicBase::getDestAlignment
unsigned getDestAlignment() const
FIXME: Remove this function once transition to Align is over.
Definition: IntrinsicInst.h:752
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::AMDGPUPromoteAllocaToVectorPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: AMDGPUPromoteAlloca.cpp:1129
llvm::APInt::udivrem
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1756
tryPromoteAllocaToVector
static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, unsigned MaxVGPRs)
Definition: AMDGPUPromoteAlloca.cpp:382
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1421
llvm::GlobalValue::hasExternalLinkage
bool hasExternalLinkage() const
Definition: GlobalValue.h:500
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
llvm::MemTransferInst
This class wraps the llvm.memcpy/memmove intrinsics.
Definition: IntrinsicInst.h:1026
llvm::Function
Definition: Function.h:60
Pass.h
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE, "AMDGPU promote alloca to vector or LDS", false, false) INITIALIZE_PASS_END(AMDGPUPromoteAlloca
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:53
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:727
llvm::GlobalValue::NotThreadLocal
@ NotThreadLocal
Definition: GlobalValue.h:192
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1182
llvm::AMDGPUAS::LOCAL_ADDRESS
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:372
llvm::GlobalObject::getAlign
MaybeAlign getAlign() const
Returns the alignment of the given variable or function.
Definition: GlobalObject.h:79
llvm::Triple::amdgcn
@ amdgcn
Definition: Triple.h:74
CaptureTracking.h
llvm::MapVector::front
std::pair< KeyT, ValueT > & front()
Definition: MapVector.h:84
llvm::IRBuilder<>
llvm::GlobalVariable
Definition: GlobalVariable.h:39
llvm::PointerType::getAddressSpace
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:682
ValueTracking.h
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
to
Should compile to
Definition: README.txt:449
llvm::getLoadStoreType
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: Instructions.h:5406
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::GlobalValue::UnnamedAddr::Global
@ Global
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::GCNSubtarget
Definition: GCNSubtarget.h:31
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:891
llvm::Intrinsic::not_intrinsic
@ not_intrinsic
Definition: Intrinsics.h:45
llvm::GlobalValue::setUnnamedAddr
void setUnnamedAddr(UnnamedAddr Val)
Definition: GlobalValue.h:225
llvm::ArrayType
Class to represent array types.
Definition: DerivedTypes.h:357
llvm::AllocaInst::getAddressSpace
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:106
llvm::FixedVectorType
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:525
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:239
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1400
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::MemSetBase::getValue
Value * getValue() const
Definition: IntrinsicInst.h:853
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1428
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:585
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:164
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1590
promoteAllocasToVector
bool promoteAllocasToVector(Function &F, TargetMachine &TM)
Definition: AMDGPUPromoteAlloca.cpp:1082
GEPToVectorIndex
static Value * GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca, Type *VecElemTy, const DataLayout &DL)
Definition: AMDGPUPromoteAlloca.cpp:349
DEBUG_TYPE
#define DEBUG_TYPE
Definition: AMDGPUPromoteAlloca.cpp:27
llvm::AMDGPUSubtarget::get
static const AMDGPUSubtarget & get(const MachineFunction &MF)
Definition: AMDGPUSubtarget.cpp:953
TargetMachine.h
llvm::ArrayType::getNumElements
uint64_t getNumElements() const
Definition: DerivedTypes.h:369
llvm::Module::globals
iterator_range< global_iterator > globals()
Definition: Module.h:614
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:115
llvm::APInt::isZero
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
GCNSubtarget.h
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
Intr
unsigned Intr
Definition: AMDGPUBaseInfo.cpp:2379
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
isCallPromotable
static bool isCallPromotable(CallInst *CI)
Definition: AMDGPUPromoteAlloca.cpp:534
Y
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::SPII::Store
@ Store
Definition: SparcInstrInfo.h:33
llvm::Value::uses
iterator_range< use_iterator > uses()
Definition: Value.h:376
false
Definition: StackSlotColoring.cpp:141
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
AMDGPU
Definition: AMDGPUReplaceLDSUseWithPointer.cpp:114
llvm::Instruction
Definition: Instruction.h:42
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1708
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:879
llvm::FunctionType::params
ArrayRef< Type * > params() const
Definition: DerivedTypes.h:130
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4487
Align
uint64_t Align
Definition: ELFObjHandler.cpp:81
llvm::FixedVectorType::get
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:684
llvm::GlobalValue::InternalLinkage
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::None
const NoneType None
Definition: None.h:24
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MemSetInst
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
Definition: IntrinsicInst.h:993
llvm::AMDGPU::isEntryFunctionCC
bool isEntryFunctionCC(CallingConv::ID CC)
Definition: AMDGPUBaseInfo.cpp:1691
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::dxil::PointerTypeAnalysis::run
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
Definition: PointerTypeAnalysis.cpp:101
llvm::sort
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1538
llvm::Triple::AMDHSA
@ AMDHSA
Definition: Triple.h:208
llvm::VectorType::isValidElementType
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
Definition: Type.cpp:675
llvm::CallBase::addDereferenceableRetAttr
void addDereferenceableRetAttr(uint64_t Bytes)
adds the dereferenceable attribute to the list of attributes.
Definition: InstrTypes.h:1590
llvm::cl::opt< bool >
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:298
llvm::AMDGPUSubtarget
Definition: AMDGPUSubtarget.h:29
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
Alloc
llvm::ICmpInst
This instruction compares its operands according to the predicate given to the constructor.
Definition: Instructions.h:1186
Index
uint32_t Index
Definition: ELFObjHandler.cpp:82
uint64_t
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:238
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::ConstantPointerNull::get
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1694
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:929
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:439
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1673
llvm::CallBase::addRetAttr
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Definition: InstrTypes.h:1516
TargetPassConfig.h
calculateVectorIndex
static Value * calculateVectorIndex(Value *Ptr, const std::map< GetElementPtrInst *, Value * > &GEPIdx)
Definition: AMDGPUPromoteAlloca.cpp:338
llvm::pdb::PDB_MemoryType::Stack
@ Stack
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::TargetMachine
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
llvm::CallingConv::AMDGPU_KERNEL
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
Definition: CallingConv.h:201
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
SI
StandardInstrumentations SI(Debug, VerifyEach)
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1737
llvm::ArrayType::get
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:638
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:944
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::PointerMayBeCaptured
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Definition: CaptureTracking.cpp:229
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::AnalysisUsage::setPreservesCFG
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:263
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
AMDGPU.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::CFGAnalyses
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
uint32_t
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::CastInst::isBitOrNoopPointerCastable
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Definition: Instructions.cpp:3503
CC
auto CC
Definition: RISCVRedundantCopyElimination.cpp:79
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::AMDGPUPromoteAllocaPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: AMDGPUPromoteAlloca.cpp:155
llvm::isAssumeLikeIntrinsic
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
Definition: ValueTracking.cpp:559
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:715
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
llvm::createAMDGPUPromoteAllocaToVector
FunctionPass * createAMDGPUPromoteAllocaToVector()
Definition: AMDGPUPromoteAlloca.cpp:1143
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::MemIntrinsicBase::getLength
Value * getLength() const
Definition: IntrinsicInst.h:735
or
compiles or
Definition: README.txt:606
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
llvm::CallingConv::SPIR_KERNEL
@ SPIR_KERNEL
Used for SPIR kernel functions.
Definition: CallingConv.h:141
llvm::Module::getContext
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:262
llvm::AMDGPUPromoteAllocaID
char & AMDGPUPromoteAllocaID
llvm::MapVector::size
size_type size() const
Definition: MapVector.h:61
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:774
llvm::createAMDGPUPromoteAlloca
FunctionPass * createAMDGPUPromoteAlloca()
Definition: AMDGPUPromoteAlloca.cpp:1139
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
llvm::GlobalValue::getAddressSpace
unsigned getAddressSpace() const
Definition: Globals.cpp:121
llvm::PreservedAnalyses::preserveSet
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
llvm::MemIntrinsic::isVolatile
bool isVolatile() const
Definition: IntrinsicInst.h:970
llvm::PointerType::getWithSamePointeeType
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer if ...
Definition: DerivedTypes.h:666
llvm::AMDGPUAS::CONSTANT_ADDRESS
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:371
N
#define N
llvm::AMDGPUPromoteAllocaToVectorID
char & AMDGPUPromoteAllocaToVectorID
llvm::PHINode
Definition: Instructions.h:2699
llvm::MemIntrinsicBase::getRawDest
Value * getRawDest() const
Definition: IntrinsicInst.h:729
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
llvm::Pass::getAnalysisUsage
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:97
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::GlobalValue::getValueType
Type * getValueType() const
Definition: GlobalValue.h:286
llvm::getLoadStorePointerOperand
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Definition: Instructions.h:5361
TM
const char LLVMTargetMachineRef TM
Definition: PassBuilderBindings.cpp:47
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:59
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:412
Mod
Module * Mod
Definition: PassBuilderBindings.cpp:54
llvm::GlobalObject::setAlignment
void setAlignment(MaybeAlign Align)
Definition: Globals.cpp:126
arrayTypeToVecType
static FixedVectorType * arrayTypeToVecType(ArrayType *ArrayTy)
Definition: AMDGPUPromoteAlloca.cpp:332
handlePromoteAllocaToVector
bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs)
Definition: AMDGPUPromoteAlloca.cpp:1070
llvm::StoreInst::getPointerOperandIndex
static unsigned getPointerOperandIndex()
Definition: Instructions.h:392
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:650
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:510
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::ArrayType::getElementType
Type * getElementType() const
Definition: DerivedTypes.h:370
llvm::FunctionType
Class to represent function types.
Definition: DerivedTypes.h:103
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:924
AMDGPUBaseInfo.h
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
INITIALIZE_PASS
TargetPassConfig.
Definition: TargetPassConfig.cpp:367