LLVM 18.0.0git
RISCVGatherScatterLowering.cpp
Go to the documentation of this file.
1//===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass custom lowers llvm.gather and llvm.scatter instructions to
10// RISC-V intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
14#include "RISCV.h"
15#include "RISCVTargetMachine.h"
22#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/IntrinsicsRISCV.h"
27#include <optional>
28
29using namespace llvm;
30using namespace PatternMatch;
31
32#define DEBUG_TYPE "riscv-gather-scatter-lowering"
33
34namespace {
35
36class RISCVGatherScatterLowering : public FunctionPass {
37 const RISCVSubtarget *ST = nullptr;
38 const RISCVTargetLowering *TLI = nullptr;
39 LoopInfo *LI = nullptr;
40 const DataLayout *DL = nullptr;
41
42 SmallVector<WeakTrackingVH> MaybeDeadPHIs;
43
44 // Cache of the BasePtr and Stride determined from this GEP. When a GEP is
45 // used by multiple gathers/scatters, this allow us to reuse the scalar
46 // instructions we created for the first gather/scatter for the others.
48
49public:
50 static char ID; // Pass identification, replacement for typeid
51
52 RISCVGatherScatterLowering() : FunctionPass(ID) {}
53
54 bool runOnFunction(Function &F) override;
55
56 void getAnalysisUsage(AnalysisUsage &AU) const override {
57 AU.setPreservesCFG();
60 }
61
62 StringRef getPassName() const override {
63 return "RISC-V gather/scatter lowering";
64 }
65
66private:
67 bool tryCreateStridedLoadStore(IntrinsicInst *II, Type *DataType, Value *Ptr,
68 Value *AlignOp);
69
70 std::pair<Value *, Value *> determineBaseAndStride(Instruction *Ptr,
72
73 bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride,
74 PHINode *&BasePtr, BinaryOperator *&Inc,
76};
77
78} // end anonymous namespace
79
80char RISCVGatherScatterLowering::ID = 0;
81
82INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE,
83 "RISC-V gather/scatter lowering pass", false, false)
84
86 return new RISCVGatherScatterLowering();
87}
88
89// TODO: Should we consider the mask when looking for a stride?
90static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) {
91 if (!isa<FixedVectorType>(StartC->getType()))
92 return std::make_pair(nullptr, nullptr);
93
94 unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements();
95
96 // Check that the start value is a strided constant.
97 auto *StartVal =
98 dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement((unsigned)0));
99 if (!StartVal)
100 return std::make_pair(nullptr, nullptr);
101 APInt StrideVal(StartVal->getValue().getBitWidth(), 0);
102 ConstantInt *Prev = StartVal;
103 for (unsigned i = 1; i != NumElts; ++i) {
104 auto *C = dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement(i));
105 if (!C)
106 return std::make_pair(nullptr, nullptr);
107
108 APInt LocalStride = C->getValue() - Prev->getValue();
109 if (i == 1)
110 StrideVal = LocalStride;
111 else if (StrideVal != LocalStride)
112 return std::make_pair(nullptr, nullptr);
113
114 Prev = C;
115 }
116
117 Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal);
118
119 return std::make_pair(StartVal, Stride);
120}
121
122static std::pair<Value *, Value *> matchStridedStart(Value *Start,
123 IRBuilderBase &Builder) {
124 // Base case, start is a strided constant.
125 auto *StartC = dyn_cast<Constant>(Start);
126 if (StartC)
127 return matchStridedConstant(StartC);
128
129 // Base case, start is a stepvector
130 if (match(Start, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
131 auto *Ty = Start->getType()->getScalarType();
132 return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
133 }
134
135 // Not a constant, maybe it's a strided constant with a splat added or
136 // multipled.
137 auto *BO = dyn_cast<BinaryOperator>(Start);
138 if (!BO || (BO->getOpcode() != Instruction::Add &&
139 BO->getOpcode() != Instruction::Shl &&
140 BO->getOpcode() != Instruction::Mul))
141 return std::make_pair(nullptr, nullptr);
142
143 // Look for an operand that is splatted.
144 unsigned OtherIndex = 0;
145 Value *Splat = getSplatValue(BO->getOperand(1));
146 if (!Splat && Instruction::isCommutative(BO->getOpcode())) {
147 Splat = getSplatValue(BO->getOperand(0));
148 OtherIndex = 1;
149 }
150 if (!Splat)
151 return std::make_pair(nullptr, nullptr);
152
153 Value *Stride;
154 std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex),
155 Builder);
156 if (!Start)
157 return std::make_pair(nullptr, nullptr);
158
159 Builder.SetInsertPoint(BO);
160 Builder.SetCurrentDebugLocation(DebugLoc());
161 // Add the splat value to the start or multiply the start and stride by the
162 // splat.
163 switch (BO->getOpcode()) {
164 default:
165 llvm_unreachable("Unexpected opcode");
166 case Instruction::Add:
167 Start = Builder.CreateAdd(Start, Splat);
168 break;
169 case Instruction::Mul:
170 Start = Builder.CreateMul(Start, Splat);
171 Stride = Builder.CreateMul(Stride, Splat);
172 break;
173 case Instruction::Shl:
174 Start = Builder.CreateShl(Start, Splat);
175 Stride = Builder.CreateShl(Stride, Splat);
176 break;
177 }
178
179 return std::make_pair(Start, Stride);
180}
181
182// Recursively, walk about the use-def chain until we find a Phi with a strided
183// start value. Build and update a scalar recurrence as we unwind the recursion.
184// We also update the Stride as we unwind. Our goal is to move all of the
185// arithmetic out of the loop.
186bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L,
187 Value *&Stride,
188 PHINode *&BasePtr,
189 BinaryOperator *&Inc,
190 IRBuilderBase &Builder) {
191 // Our base case is a Phi.
192 if (auto *Phi = dyn_cast<PHINode>(Index)) {
193 // A phi node we want to perform this function on should be from the
194 // loop header.
195 if (Phi->getParent() != L->getHeader())
196 return false;
197
198 Value *Step, *Start;
199 if (!matchSimpleRecurrence(Phi, Inc, Start, Step) ||
200 Inc->getOpcode() != Instruction::Add)
201 return false;
202 assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
203 unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1;
204 assert(Phi->getIncomingValue(IncrementingBlock) == Inc &&
205 "Expected one operand of phi to be Inc");
206
207 // Only proceed if the step is loop invariant.
208 if (!L->isLoopInvariant(Step))
209 return false;
210
211 // Step should be a splat.
212 Step = getSplatValue(Step);
213 if (!Step)
214 return false;
215
216 std::tie(Start, Stride) = matchStridedStart(Start, Builder);
217 if (!Start)
218 return false;
219 assert(Stride != nullptr);
220
221 // Build scalar phi and increment.
222 BasePtr =
223 PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi);
224 Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar",
225 Inc);
226 BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock));
227 BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock));
228
229 // Note that this Phi might be eligible for removal.
230 MaybeDeadPHIs.push_back(Phi);
231 return true;
232 }
233
234 // Otherwise look for binary operator.
235 auto *BO = dyn_cast<BinaryOperator>(Index);
236 if (!BO)
237 return false;
238
239 switch (BO->getOpcode()) {
240 default:
241 return false;
242 case Instruction::Or:
243 // We need to be able to treat Or as Add.
244 if (!haveNoCommonBitsSet(BO->getOperand(0), BO->getOperand(1), *DL))
245 return false;
246 break;
247 case Instruction::Add:
248 break;
249 case Instruction::Shl:
250 break;
251 case Instruction::Mul:
252 break;
253 }
254
255 // We should have one operand in the loop and one splat.
256 Value *OtherOp;
257 if (isa<Instruction>(BO->getOperand(0)) &&
258 L->contains(cast<Instruction>(BO->getOperand(0)))) {
259 Index = cast<Instruction>(BO->getOperand(0));
260 OtherOp = BO->getOperand(1);
261 } else if (isa<Instruction>(BO->getOperand(1)) &&
262 L->contains(cast<Instruction>(BO->getOperand(1))) &&
263 Instruction::isCommutative(BO->getOpcode())) {
264 Index = cast<Instruction>(BO->getOperand(1));
265 OtherOp = BO->getOperand(0);
266 } else {
267 return false;
268 }
269
270 // Make sure other op is loop invariant.
271 if (!L->isLoopInvariant(OtherOp))
272 return false;
273
274 // Make sure we have a splat.
275 Value *SplatOp = getSplatValue(OtherOp);
276 if (!SplatOp)
277 return false;
278
279 // Recurse up the use-def chain.
280 if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder))
281 return false;
282
283 // Locate the Step and Start values from the recurrence.
284 unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0;
285 unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0;
286 Value *Step = Inc->getOperand(StepIndex);
287 Value *Start = BasePtr->getOperand(StartBlock);
288
289 // We need to adjust the start value in the preheader.
290 Builder.SetInsertPoint(
291 BasePtr->getIncomingBlock(StartBlock)->getTerminator());
292 Builder.SetCurrentDebugLocation(DebugLoc());
293
294 switch (BO->getOpcode()) {
295 default:
296 llvm_unreachable("Unexpected opcode!");
297 case Instruction::Add:
298 case Instruction::Or: {
299 // An add only affects the start value. It's ok to do this for Or because
300 // we already checked that there are no common set bits.
301 Start = Builder.CreateAdd(Start, SplatOp, "start");
302 break;
303 }
304 case Instruction::Mul: {
305 Start = Builder.CreateMul(Start, SplatOp, "start");
306 Step = Builder.CreateMul(Step, SplatOp, "step");
307 Stride = Builder.CreateMul(Stride, SplatOp, "stride");
308 break;
309 }
310 case Instruction::Shl: {
311 Start = Builder.CreateShl(Start, SplatOp, "start");
312 Step = Builder.CreateShl(Step, SplatOp, "step");
313 Stride = Builder.CreateShl(Stride, SplatOp, "stride");
314 break;
315 }
316 }
317
318 Inc->setOperand(StepIndex, Step);
319 BasePtr->setIncomingValue(StartBlock, Start);
320 return true;
321}
322
323std::pair<Value *, Value *>
324RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr,
325 IRBuilderBase &Builder) {
326
327 // A gather/scatter of a splat is a zero strided load/store.
328 if (auto *BasePtr = getSplatValue(Ptr)) {
329 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
330 return std::make_pair(BasePtr, ConstantInt::get(IntPtrTy, 0));
331 }
332
333 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
334 if (!GEP)
335 return std::make_pair(nullptr, nullptr);
336
337 auto I = StridedAddrs.find(GEP);
338 if (I != StridedAddrs.end())
339 return I->second;
340
341 SmallVector<Value *, 2> Ops(GEP->operands());
342
343 // Base pointer needs to be a scalar.
344 Value *ScalarBase = Ops[0];
345 if (ScalarBase->getType()->isVectorTy()) {
346 ScalarBase = getSplatValue(ScalarBase);
347 if (!ScalarBase)
348 return std::make_pair(nullptr, nullptr);
349 }
350
351 std::optional<unsigned> VecOperand;
352 unsigned TypeScale = 0;
353
354 // Look for a vector operand and scale.
356 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
357 if (!Ops[i]->getType()->isVectorTy())
358 continue;
359
360 if (VecOperand)
361 return std::make_pair(nullptr, nullptr);
362
363 VecOperand = i;
364
365 TypeSize TS = DL->getTypeAllocSize(GTI.getIndexedType());
366 if (TS.isScalable())
367 return std::make_pair(nullptr, nullptr);
368
369 TypeScale = TS.getFixedValue();
370 }
371
372 // We need to find a vector index to simplify.
373 if (!VecOperand)
374 return std::make_pair(nullptr, nullptr);
375
376 // We can't extract the stride if the arithmetic is done at a different size
377 // than the pointer type. Adding the stride later may not wrap correctly.
378 // Technically we could handle wider indices, but I don't expect that in
379 // practice. Handle one special case here - constants. This simplifies
380 // writing test cases.
381 Value *VecIndex = Ops[*VecOperand];
382 Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType());
383 if (VecIndex->getType() != VecIntPtrTy) {
384 auto *VecIndexC = dyn_cast<Constant>(VecIndex);
385 if (!VecIndexC)
386 return std::make_pair(nullptr, nullptr);
387 if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits())
388 VecIndex = ConstantFoldCastInstruction(Instruction::Trunc, VecIndexC, VecIntPtrTy);
389 else
390 VecIndex = ConstantFoldCastInstruction(Instruction::SExt, VecIndexC, VecIntPtrTy);
391 }
392
393 // Handle the non-recursive case. This is what we see if the vectorizer
394 // decides to use a scalar IV + vid on demand instead of a vector IV.
395 auto [Start, Stride] = matchStridedStart(VecIndex, Builder);
396 if (Start) {
397 assert(Stride);
398 Builder.SetInsertPoint(GEP);
399
400 // Replace the vector index with the scalar start and build a scalar GEP.
401 Ops[*VecOperand] = Start;
402 Type *SourceTy = GEP->getSourceElementType();
403 Value *BasePtr =
404 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
405
406 // Convert stride to pointer size if needed.
407 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
408 assert(Stride->getType() == IntPtrTy && "Unexpected type");
409
410 // Scale the stride by the size of the indexed type.
411 if (TypeScale != 1)
412 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
413
414 auto P = std::make_pair(BasePtr, Stride);
415 StridedAddrs[GEP] = P;
416 return P;
417 }
418
419 // Make sure we're in a loop and that has a pre-header and a single latch.
420 Loop *L = LI->getLoopFor(GEP->getParent());
421 if (!L || !L->getLoopPreheader() || !L->getLoopLatch())
422 return std::make_pair(nullptr, nullptr);
423
424 BinaryOperator *Inc;
425 PHINode *BasePhi;
426 if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder))
427 return std::make_pair(nullptr, nullptr);
428
429 assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi.");
430 unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1;
431 assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc &&
432 "Expected one operand of phi to be Inc");
433
434 Builder.SetInsertPoint(GEP);
435
436 // Replace the vector index with the scalar phi and build a scalar GEP.
437 Ops[*VecOperand] = BasePhi;
438 Type *SourceTy = GEP->getSourceElementType();
439 Value *BasePtr =
440 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front());
441
442 // Final adjustments to stride should go in the start block.
443 Builder.SetInsertPoint(
444 BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator());
445
446 // Convert stride to pointer size if needed.
447 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType());
448 assert(Stride->getType() == IntPtrTy && "Unexpected type");
449
450 // Scale the stride by the size of the indexed type.
451 if (TypeScale != 1)
452 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale));
453
454 auto P = std::make_pair(BasePtr, Stride);
455 StridedAddrs[GEP] = P;
456 return P;
457}
458
459bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II,
460 Type *DataType,
461 Value *Ptr,
462 Value *AlignOp) {
463 // Make sure the operation will be supported by the backend.
464 MaybeAlign MA = cast<ConstantInt>(AlignOp)->getMaybeAlignValue();
465 EVT DataTypeVT = TLI->getValueType(*DL, DataType);
466 if (!MA || !TLI->isLegalStridedLoadStore(DataTypeVT, *MA))
467 return false;
468
469 // FIXME: Let the backend type legalize by splitting/widening?
470 if (!TLI->isTypeLegal(DataTypeVT))
471 return false;
472
473 // Pointer should be an instruction.
474 auto *PtrI = dyn_cast<Instruction>(Ptr);
475 if (!PtrI)
476 return false;
477
478 LLVMContext &Ctx = PtrI->getContext();
480 Builder.SetInsertPoint(PtrI);
481
482 Value *BasePtr, *Stride;
483 std::tie(BasePtr, Stride) = determineBaseAndStride(PtrI, Builder);
484 if (!BasePtr)
485 return false;
486 assert(Stride != nullptr);
487
488 Builder.SetInsertPoint(II);
489
490 CallInst *Call;
491 if (II->getIntrinsicID() == Intrinsic::masked_gather)
492 Call = Builder.CreateIntrinsic(
493 Intrinsic::riscv_masked_strided_load,
494 {DataType, BasePtr->getType(), Stride->getType()},
495 {II->getArgOperand(3), BasePtr, Stride, II->getArgOperand(2)});
496 else
497 Call = Builder.CreateIntrinsic(
498 Intrinsic::riscv_masked_strided_store,
499 {DataType, BasePtr->getType(), Stride->getType()},
500 {II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3)});
501
502 Call->takeName(II);
503 II->replaceAllUsesWith(Call);
504 II->eraseFromParent();
505
506 if (PtrI->use_empty())
508
509 return true;
510}
511
512bool RISCVGatherScatterLowering::runOnFunction(Function &F) {
513 if (skipFunction(F))
514 return false;
515
516 auto &TPC = getAnalysis<TargetPassConfig>();
517 auto &TM = TPC.getTM<RISCVTargetMachine>();
518 ST = &TM.getSubtarget<RISCVSubtarget>(F);
519 if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors())
520 return false;
521
522 TLI = ST->getTargetLowering();
523 DL = &F.getParent()->getDataLayout();
524 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
525
526 StridedAddrs.clear();
527
530
531 bool Changed = false;
532
533 for (BasicBlock &BB : F) {
534 for (Instruction &I : BB) {
535 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
536 if (II && II->getIntrinsicID() == Intrinsic::masked_gather) {
537 Gathers.push_back(II);
538 } else if (II && II->getIntrinsicID() == Intrinsic::masked_scatter) {
539 Scatters.push_back(II);
540 }
541 }
542 }
543
544 // Rewrite gather/scatter to form strided load/store if possible.
545 for (auto *II : Gathers)
546 Changed |= tryCreateStridedLoadStore(
547 II, II->getType(), II->getArgOperand(0), II->getArgOperand(1));
548 for (auto *II : Scatters)
549 Changed |=
550 tryCreateStridedLoadStore(II, II->getArgOperand(0)->getType(),
551 II->getArgOperand(1), II->getArgOperand(2));
552
553 // Remove any dead phis.
554 while (!MaybeDeadPHIs.empty()) {
555 if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val()))
557 }
558
559 return Changed;
560}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
assume Assume Builder
Hexagon Common GEP
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
static std::pair< Value *, Value * > matchStridedStart(Value *Start, IRBuilderBase &Builder)
static std::pair< Value *, Value * > matchStridedConstant(Constant *StartC)
#define DEBUG_TYPE
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
Definition: APInt.h:76
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
BinaryOps getOpcode() const
Definition: InstrTypes.h:391
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1357
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:136
This is an important base class in LLVM.
Definition: Constant.h:41
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:418
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2628
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:83
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:594
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:529
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
FunctionPass * createRISCVGatherScatterLoweringPass()
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
gep_type_iterator gep_type_begin(const User *GEP)
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr)
If the specified value is an effectively dead PHI node, due to being a def-use chain of single-use no...
Definition: Local.cpp:637
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
Extended Value Type.
Definition: ValueTypes.h:34
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117