LLVM 22.0.0git
AMDGPULateCodeGenPrepare.cpp
Go to the documentation of this file.
1//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR *just* before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/InstVisitor.h"
23#include "llvm/IR/IntrinsicsAMDGPU.h"
28
29#define DEBUG_TYPE "amdgpu-late-codegenprepare"
30
31using namespace llvm;
32
33// Scalar load widening needs running after load-store-vectorizer as that pass
34// doesn't handle overlapping cases. In addition, this pass enhances the
35// widening to handle cases where scalar sub-dword loads are naturally aligned
36// only but not dword aligned.
37static cl::opt<bool>
38 WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads",
39 cl::desc("Widen sub-dword constant address space loads in "
40 "AMDGPULateCodeGenPrepare"),
42
43namespace {
44
45class AMDGPULateCodeGenPrepare
46 : public InstVisitor<AMDGPULateCodeGenPrepare, bool> {
47 Function &F;
48 const DataLayout &DL;
49 const GCNSubtarget &ST;
50
51 AssumptionCache *const AC;
53
55
56public:
57 AMDGPULateCodeGenPrepare(Function &F, const GCNSubtarget &ST,
59 : F(F), DL(F.getDataLayout()), ST(ST), AC(AC), UA(UA) {}
60 bool run();
61 bool visitInstruction(Instruction &) { return false; }
62
63 // Check if the specified value is at least DWORD aligned.
64 bool isDWORDAligned(const Value *V) const {
65 KnownBits Known = computeKnownBits(V, DL, AC);
66 return Known.countMinTrailingZeros() >= 2;
67 }
68
69 bool canWidenScalarExtLoad(LoadInst &LI) const;
70 bool visitLoadInst(LoadInst &LI);
71};
72
74
75class LiveRegOptimizer {
76private:
77 Module &Mod;
78 const DataLayout &DL;
79 const GCNSubtarget &ST;
80
81 /// The scalar type to convert to
82 Type *const ConvertToScalar;
83 /// Map of Value -> Converted Value
84 ValueToValueMap ValMap;
85 /// Map of containing conversions from Optimal Type -> Original Type per BB.
86 DenseMap<BasicBlock *, ValueToValueMap> BBUseValMap;
87
88public:
89 /// Calculate the and \p return the type to convert to given a problematic \p
90 /// OriginalType. In some instances, we may widen the type (e.g. v2i8 -> i32).
91 Type *calculateConvertType(Type *OriginalType);
92 /// Convert the virtual register defined by \p V to the compatible vector of
93 /// legal type
94 Value *convertToOptType(Instruction *V, BasicBlock::iterator &InstPt);
95 /// Convert the virtual register defined by \p V back to the original type \p
96 /// ConvertType, stripping away the MSBs in cases where there was an imperfect
97 /// fit (e.g. v2i32 -> v7i8)
98 Value *convertFromOptType(Type *ConvertType, Instruction *V,
100 BasicBlock *InsertBlock);
101 /// Check for problematic PHI nodes or cross-bb values based on the value
102 /// defined by \p I, and coerce to legal types if necessary. For problematic
103 /// PHI node, we coerce all incoming values in a single invocation.
104 bool optimizeLiveType(Instruction *I,
105 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
106
107 // Whether or not the type should be replaced to avoid inefficient
108 // legalization code
109 bool shouldReplace(Type *ITy) {
110 FixedVectorType *VTy = dyn_cast<FixedVectorType>(ITy);
111 if (!VTy)
112 return false;
113
114 const auto *TLI = ST.getTargetLowering();
115
116 Type *EltTy = VTy->getElementType();
117 // If the element size is not less than the convert to scalar size, then we
118 // can't do any bit packing
119 if (!EltTy->isIntegerTy() ||
120 EltTy->getScalarSizeInBits() > ConvertToScalar->getScalarSizeInBits())
121 return false;
122
123 // Only coerce illegal types
125 TLI->getTypeConversion(EltTy->getContext(), EVT::getEVT(EltTy, false));
126 return LK.first != TargetLoweringBase::TypeLegal;
127 }
128
129 bool isOpLegal(const Instruction *I) {
131 return true;
132
133 // Any store is a profitable sink (prevents flip-flopping)
134 if (isa<StoreInst>(I))
135 return true;
136
137 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
138 if (auto *VT = dyn_cast<FixedVectorType>(BO->getType())) {
139 if (const auto *IT = dyn_cast<IntegerType>(VT->getElementType())) {
140 unsigned EB = IT->getBitWidth();
141 unsigned EC = VT->getNumElements();
142 // Check for SDWA-compatible operation
143 if ((EB == 8 || EB == 16) && ST.hasSDWA() && EC * EB <= 32) {
144 switch (BO->getOpcode()) {
145 case Instruction::Add:
146 case Instruction::Sub:
147 case Instruction::And:
148 case Instruction::Or:
149 case Instruction::Xor:
150 return true;
151 default:
152 break;
153 }
154 }
155 }
156 }
157 }
158
159 return false;
160 }
161
162 bool isCoercionProfitable(Instruction *II) {
163 SmallPtrSet<Instruction *, 4> CVisited;
164 SmallVector<Instruction *, 4> UserList;
165
166 // Check users for profitable conditions (across block user which can
167 // natively handle the illegal vector).
168 for (User *V : II->users())
169 if (auto *UseInst = dyn_cast<Instruction>(V))
170 UserList.push_back(UseInst);
171
172 auto IsLookThru = [](Instruction *II) {
173 if (const auto *Intr = dyn_cast<IntrinsicInst>(II))
174 return Intr->getIntrinsicID() == Intrinsic::amdgcn_perm;
175 return isa<PHINode, ShuffleVectorInst, InsertElementInst,
176 ExtractElementInst, CastInst>(II);
177 };
178
179 while (!UserList.empty()) {
180 auto CII = UserList.pop_back_val();
181 if (!CVisited.insert(CII).second)
182 continue;
183
184 // Same-BB filter must look at the *user*; and allow non-lookthrough
185 // users when the def is a PHI (loop-header pattern).
186 if (CII->getParent() == II->getParent() && !IsLookThru(CII) &&
188 continue;
189
190 if (isOpLegal(CII))
191 return true;
192
193 if (IsLookThru(CII))
194 for (User *V : CII->users())
195 if (auto *UseInst = dyn_cast<Instruction>(V))
196 UserList.push_back(UseInst);
197 }
198 return false;
199 }
200
201 LiveRegOptimizer(Module &Mod, const GCNSubtarget &ST)
202 : Mod(Mod), DL(Mod.getDataLayout()), ST(ST),
203 ConvertToScalar(Type::getInt32Ty(Mod.getContext())) {}
204};
205
206} // end anonymous namespace
207
208bool AMDGPULateCodeGenPrepare::run() {
209 // "Optimize" the virtual regs that cross basic block boundaries. When
210 // building the SelectionDAG, vectors of illegal types that cross basic blocks
211 // will be scalarized and widened, with each scalar living in its
212 // own register. To work around this, this optimization converts the
213 // vectors to equivalent vectors of legal type (which are converted back
214 // before uses in subsequent blocks), to pack the bits into fewer physical
215 // registers (used in CopyToReg/CopyFromReg pairs).
216 LiveRegOptimizer LRO(*F.getParent(), ST);
217
218 bool Changed = false;
219
220 bool HasScalarSubwordLoads = ST.hasScalarSubwordLoads();
221
222 for (auto &BB : reverse(F))
223 for (Instruction &I : make_early_inc_range(reverse(BB))) {
224 Changed |= !HasScalarSubwordLoads && visit(I);
225 Changed |= LRO.optimizeLiveType(&I, DeadInsts);
226 }
227
229 return Changed;
230}
231
232Type *LiveRegOptimizer::calculateConvertType(Type *OriginalType) {
233 assert(OriginalType->getScalarSizeInBits() <=
234 ConvertToScalar->getScalarSizeInBits());
235
236 FixedVectorType *VTy = cast<FixedVectorType>(OriginalType);
237
238 TypeSize OriginalSize = DL.getTypeSizeInBits(VTy);
239 TypeSize ConvertScalarSize = DL.getTypeSizeInBits(ConvertToScalar);
240 unsigned ConvertEltCount =
241 (OriginalSize + ConvertScalarSize - 1) / ConvertScalarSize;
242
243 if (OriginalSize <= ConvertScalarSize)
244 return IntegerType::get(Mod.getContext(), ConvertScalarSize);
245
246 return VectorType::get(Type::getIntNTy(Mod.getContext(), ConvertScalarSize),
247 ConvertEltCount, false);
248}
249
250Value *LiveRegOptimizer::convertToOptType(Instruction *V,
251 BasicBlock::iterator &InsertPt) {
252 FixedVectorType *VTy = cast<FixedVectorType>(V->getType());
253 Type *NewTy = calculateConvertType(V->getType());
254
255 TypeSize OriginalSize = DL.getTypeSizeInBits(VTy);
256 TypeSize NewSize = DL.getTypeSizeInBits(NewTy);
257
258 IRBuilder<> Builder(V->getParent(), InsertPt);
259 // If there is a bitsize match, we can fit the old vector into a new vector of
260 // desired type.
261 if (OriginalSize == NewSize)
262 return Builder.CreateBitCast(V, NewTy, V->getName() + ".bc");
263
264 // If there is a bitsize mismatch, we must use a wider vector.
265 assert(NewSize > OriginalSize);
266 uint64_t ExpandedVecElementCount = NewSize / VTy->getScalarSizeInBits();
267
268 SmallVector<int, 8> ShuffleMask;
269 uint64_t OriginalElementCount = VTy->getElementCount().getFixedValue();
270 for (unsigned I = 0; I < OriginalElementCount; I++)
271 ShuffleMask.push_back(I);
272
273 for (uint64_t I = OriginalElementCount; I < ExpandedVecElementCount; I++)
274 ShuffleMask.push_back(OriginalElementCount);
275
276 Value *ExpandedVec = Builder.CreateShuffleVector(V, ShuffleMask);
277 return Builder.CreateBitCast(ExpandedVec, NewTy, V->getName() + ".bc");
278}
279
280Value *LiveRegOptimizer::convertFromOptType(Type *ConvertType, Instruction *V,
281 BasicBlock::iterator &InsertPt,
282 BasicBlock *InsertBB) {
283 FixedVectorType *NewVTy = cast<FixedVectorType>(ConvertType);
284
285 TypeSize OriginalSize = DL.getTypeSizeInBits(V->getType());
286 TypeSize NewSize = DL.getTypeSizeInBits(NewVTy);
287
288 IRBuilder<> Builder(InsertBB, InsertPt);
289 // If there is a bitsize match, we simply convert back to the original type.
290 if (OriginalSize == NewSize)
291 return Builder.CreateBitCast(V, NewVTy, V->getName() + ".bc");
292
293 // If there is a bitsize mismatch, then we must have used a wider value to
294 // hold the bits.
295 assert(OriginalSize > NewSize);
296 // For wide scalars, we can just truncate the value.
297 if (!V->getType()->isVectorTy()) {
299 Builder.CreateTrunc(V, IntegerType::get(Mod.getContext(), NewSize)));
300 return cast<Instruction>(Builder.CreateBitCast(Trunc, NewVTy));
301 }
302
303 // For wider vectors, we must strip the MSBs to convert back to the original
304 // type.
305 VectorType *ExpandedVT = VectorType::get(
306 Type::getIntNTy(Mod.getContext(), NewVTy->getScalarSizeInBits()),
307 (OriginalSize / NewVTy->getScalarSizeInBits()), false);
308 Instruction *Converted =
309 cast<Instruction>(Builder.CreateBitCast(V, ExpandedVT));
310
311 unsigned NarrowElementCount = NewVTy->getElementCount().getFixedValue();
312 SmallVector<int, 8> ShuffleMask(NarrowElementCount);
313 std::iota(ShuffleMask.begin(), ShuffleMask.end(), 0);
314
315 return Builder.CreateShuffleVector(Converted, ShuffleMask);
316}
317
318bool LiveRegOptimizer::optimizeLiveType(
319 Instruction *I, SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
320 SmallVector<Instruction *, 4> Worklist;
321 SmallPtrSet<PHINode *, 4> PhiNodes;
322 SmallPtrSet<Instruction *, 4> Defs;
323 SmallPtrSet<Instruction *, 4> Uses;
324 SmallPtrSet<Instruction *, 4> Visited;
325
326 Worklist.push_back(cast<Instruction>(I));
327 while (!Worklist.empty()) {
328 Instruction *II = Worklist.pop_back_val();
329
330 if (!Visited.insert(II).second)
331 continue;
332
333 if (!shouldReplace(II->getType()))
334 continue;
335
336 if (!isCoercionProfitable(II))
337 continue;
338
339 if (PHINode *Phi = dyn_cast<PHINode>(II)) {
340 PhiNodes.insert(Phi);
341 // Collect all the incoming values of problematic PHI nodes.
342 for (Value *V : Phi->incoming_values()) {
343 // Repeat the collection process for newly found PHI nodes.
344 if (PHINode *OpPhi = dyn_cast<PHINode>(V)) {
345 if (!PhiNodes.count(OpPhi) && !Visited.count(OpPhi))
346 Worklist.push_back(OpPhi);
347 continue;
348 }
349
351 // Other incoming value types (e.g. vector literals) are unhandled
352 if (!IncInst && !isa<ConstantAggregateZero>(V))
353 return false;
354
355 // Collect all other incoming values for coercion.
356 if (IncInst)
357 Defs.insert(IncInst);
358 }
359 }
360
361 // Collect all relevant uses.
362 for (User *V : II->users()) {
363 // Repeat the collection process for problematic PHI nodes.
364 if (PHINode *OpPhi = dyn_cast<PHINode>(V)) {
365 if (!PhiNodes.count(OpPhi) && !Visited.count(OpPhi))
366 Worklist.push_back(OpPhi);
367 continue;
368 }
369
370 Instruction *UseInst = cast<Instruction>(V);
371 // Collect all uses of PHINodes and any use the crosses BB boundaries.
372 if (UseInst->getParent() != II->getParent() || isa<PHINode>(II)) {
373 Uses.insert(UseInst);
374 if (!isa<PHINode>(II))
375 Defs.insert(II);
376 }
377 }
378 }
379
380 // Coerce and track the defs.
381 for (Instruction *D : Defs) {
382 if (!ValMap.contains(D)) {
383 BasicBlock::iterator InsertPt = std::next(D->getIterator());
384 Value *ConvertVal = convertToOptType(D, InsertPt);
385 assert(ConvertVal);
386 ValMap[D] = ConvertVal;
387 }
388 }
389
390 // Construct new-typed PHI nodes.
391 for (PHINode *Phi : PhiNodes) {
392 ValMap[Phi] = PHINode::Create(calculateConvertType(Phi->getType()),
393 Phi->getNumIncomingValues(),
394 Phi->getName() + ".tc", Phi->getIterator());
395 }
396
397 // Connect all the PHI nodes with their new incoming values.
398 for (PHINode *Phi : PhiNodes) {
399 PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
400 bool MissingIncVal = false;
401 for (int I = 0, E = Phi->getNumIncomingValues(); I < E; I++) {
402 Value *IncVal = Phi->getIncomingValue(I);
403 if (isa<ConstantAggregateZero>(IncVal)) {
404 Type *NewType = calculateConvertType(Phi->getType());
405 NewPhi->addIncoming(ConstantInt::get(NewType, 0, false),
406 Phi->getIncomingBlock(I));
407 } else if (Value *Val = ValMap.lookup(IncVal))
408 NewPhi->addIncoming(Val, Phi->getIncomingBlock(I));
409 else
410 MissingIncVal = true;
411 }
412 if (MissingIncVal) {
413 Value *DeadVal = ValMap[Phi];
414 // The coercion chain of the PHI is broken. Delete the Phi
415 // from the ValMap and any connected / user Phis.
416 SmallVector<Value *, 4> PHIWorklist;
417 SmallPtrSet<Value *, 4> VisitedPhis;
418 PHIWorklist.push_back(DeadVal);
419 while (!PHIWorklist.empty()) {
420 Value *NextDeadValue = PHIWorklist.pop_back_val();
421 VisitedPhis.insert(NextDeadValue);
422 auto OriginalPhi =
423 llvm::find_if(PhiNodes, [this, &NextDeadValue](PHINode *CandPhi) {
424 return ValMap[CandPhi] == NextDeadValue;
425 });
426 // This PHI may have already been removed from maps when
427 // unwinding a previous Phi
428 if (OriginalPhi != PhiNodes.end())
429 ValMap.erase(*OriginalPhi);
430
431 DeadInsts.emplace_back(cast<Instruction>(NextDeadValue));
432
433 for (User *U : NextDeadValue->users()) {
434 if (!VisitedPhis.contains(cast<PHINode>(U)))
435 PHIWorklist.push_back(U);
436 }
437 }
438 } else {
439 DeadInsts.emplace_back(cast<Instruction>(Phi));
440 }
441 }
442 // Coerce back to the original type and replace the uses.
443 for (Instruction *U : Uses) {
444 // Replace all converted operands for a use.
445 for (auto [OpIdx, Op] : enumerate(U->operands())) {
446 if (Value *Val = ValMap.lookup(Op)) {
447 Value *NewVal = nullptr;
448 if (BBUseValMap.contains(U->getParent()) &&
449 BBUseValMap[U->getParent()].contains(Val))
450 NewVal = BBUseValMap[U->getParent()][Val];
451 else {
452 BasicBlock::iterator InsertPt = U->getParent()->getFirstNonPHIIt();
453 // We may pick up ops that were previously converted for users in
454 // other blocks. If there is an originally typed definition of the Op
455 // already in this block, simply reuse it.
457 U->getParent() == cast<Instruction>(Op)->getParent()) {
458 NewVal = Op;
459 } else {
460 NewVal =
461 convertFromOptType(Op->getType(), cast<Instruction>(ValMap[Op]),
462 InsertPt, U->getParent());
463 BBUseValMap[U->getParent()][ValMap[Op]] = NewVal;
464 }
465 }
466 assert(NewVal);
467 U->setOperand(OpIdx, NewVal);
468 }
469 }
470 }
471
472 return true;
473}
474
475bool AMDGPULateCodeGenPrepare::canWidenScalarExtLoad(LoadInst &LI) const {
476 unsigned AS = LI.getPointerAddressSpace();
477 // Skip non-constant address space.
478 if (AS != AMDGPUAS::CONSTANT_ADDRESS &&
480 return false;
481 // Skip non-simple loads.
482 if (!LI.isSimple())
483 return false;
484 Type *Ty = LI.getType();
485 // Skip aggregate types.
486 if (Ty->isAggregateType())
487 return false;
488 unsigned TySize = DL.getTypeStoreSize(Ty);
489 // Only handle sub-DWORD loads.
490 if (TySize >= 4)
491 return false;
492 // That load must be at least naturally aligned.
493 if (LI.getAlign() < DL.getABITypeAlign(Ty))
494 return false;
495 // It should be uniform, i.e. a scalar load.
496 return UA.isUniform(&LI);
497}
498
499bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
500 if (!WidenLoads)
501 return false;
502
503 // Skip if that load is already aligned on DWORD at least as it's handled in
504 // SDAG.
505 if (LI.getAlign() >= 4)
506 return false;
507
508 if (!canWidenScalarExtLoad(LI))
509 return false;
510
511 int64_t Offset = 0;
512 auto *Base =
514 // If that base is not DWORD aligned, it's not safe to perform the following
515 // transforms.
516 if (!isDWORDAligned(Base))
517 return false;
518
519 int64_t Adjust = Offset & 0x3;
520 if (Adjust == 0) {
521 // With a zero adjust, the original alignment could be promoted with a
522 // better one.
523 LI.setAlignment(Align(4));
524 return true;
525 }
526
527 IRBuilder<> IRB(&LI);
528 IRB.SetCurrentDebugLocation(LI.getDebugLoc());
529
530 unsigned LdBits = DL.getTypeStoreSizeInBits(LI.getType());
531 auto *IntNTy = Type::getIntNTy(LI.getContext(), LdBits);
532
533 auto *NewPtr = IRB.CreateConstGEP1_64(
534 IRB.getInt8Ty(),
535 IRB.CreateAddrSpaceCast(Base, LI.getPointerOperand()->getType()),
536 Offset - Adjust);
537
538 LoadInst *NewLd = IRB.CreateAlignedLoad(IRB.getInt32Ty(), NewPtr, Align(4));
539 NewLd->copyMetadata(LI);
540 NewLd->setMetadata(LLVMContext::MD_range, nullptr);
541
542 unsigned ShAmt = Adjust * 8;
543 Value *NewVal = IRB.CreateBitCast(
544 IRB.CreateTrunc(IRB.CreateLShr(NewLd, ShAmt),
545 DL.typeSizeEqualsStoreSize(LI.getType()) ? IntNTy
546 : LI.getType()),
547 LI.getType());
548 LI.replaceAllUsesWith(NewVal);
549 DeadInsts.emplace_back(&LI);
550
551 return true;
552}
553
554PreservedAnalyses
556 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
557 AssumptionCache &AC = FAM.getResult<AssumptionAnalysis>(F);
558 UniformityInfo &UI = FAM.getResult<UniformityInfoAnalysis>(F);
559
560 bool Changed = AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
561
562 if (!Changed)
563 return PreservedAnalyses::all();
566 return PA;
567}
568
570public:
571 static char ID;
572
574
575 StringRef getPassName() const override {
576 return "AMDGPU IR late optimizations";
577 }
578
579 void getAnalysisUsage(AnalysisUsage &AU) const override {
583 // Invalidates UniformityInfo
584 AU.setPreservesCFG();
585 }
586
587 bool runOnFunction(Function &F) override;
588};
589
591 if (skipFunction(F))
592 return false;
593
595 const TargetMachine &TM = TPC.getTM<TargetMachine>();
596 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
597
598 AssumptionCache &AC =
599 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
600 UniformityInfo &UI =
601 getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
602
603 return AMDGPULateCodeGenPrepare(F, ST, &AC, UI).run();
604}
605
607 "AMDGPU IR late optimizations", false, false)
612 "AMDGPU IR late optimizations", false, false)
613
615
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads", cl::desc("Widen sub-dword constant address space loads in " "AMDGPULateCodeGenPrepare"), cl::ReallyHidden, cl::init(true))
The AMDGPU TargetMachine interface definition for hw codegen targets.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
Remove Loads Into Fake Uses
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
PreservedAnalyses run(Function &, FunctionAnalysisManager &)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
bool erase(const KeyT &Val)
Definition DenseMap.h:330
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
bool isUniform(ConstValueRefT V) const
Whether V is uniform/non-divergent.
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
bool isSimple() const
Align getAlign() const
Return the alignment of the access that is being performed.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
Primary interface to the complete machine description for the target machine.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
Target-Independent Code Generator Pass Configuration Options.
TMC & getTM() const
Get the right type of TargetMachine for this target.
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
Analysis pass which computes UniformityInfo.
Legacy analysis pass which computes a CycleInfo.
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
const ParentTy * getParent() const
Definition ilist_node.h:34
Changed
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2503
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow instructions that are not...
Definition Local.cpp:548
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1779
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
FunctionPass * createAMDGPULateCodeGenPrepareLegacyPass()
DenseMap< const Value *, Value * > ValueToValueMap
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242