LLVM 22.0.0git
RISCVCodeGenPrepare.cpp
Go to the documentation of this file.
1//===----- RISCVCodeGenPrepare.cpp ----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is a RISC-V specific version of CodeGenPrepare.
10// It munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach.
13//
14//===----------------------------------------------------------------------===//
15
16#include "RISCV.h"
17#include "RISCVTargetMachine.h"
18#include "llvm/ADT/Statistic.h"
21#include "llvm/IR/Dominators.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/InstVisitor.h"
24#include "llvm/IR/Intrinsics.h"
27#include "llvm/Pass.h"
29
30using namespace llvm;
31
32#define DEBUG_TYPE "riscv-codegenprepare"
33#define PASS_NAME "RISC-V CodeGenPrepare"
34
35namespace {
36class RISCVCodeGenPrepare : public InstVisitor<RISCVCodeGenPrepare, bool> {
37 Function &F;
38 const DataLayout *DL;
39 const DominatorTree *DT;
40 const RISCVSubtarget *ST;
41
42public:
43 RISCVCodeGenPrepare(Function &F, const DominatorTree *DT,
44 const RISCVSubtarget *ST)
45 : F(F), DL(&F.getDataLayout()), DT(DT), ST(ST) {}
46 bool run();
47 bool visitInstruction(Instruction &I) { return false; }
48 bool visitAnd(BinaryOperator &BO);
49 bool visitIntrinsicInst(IntrinsicInst &I);
50 bool expandVPStrideLoad(IntrinsicInst &I);
51 bool widenVPMerge(IntrinsicInst &I);
52};
53} // namespace
54
55namespace {
56class RISCVCodeGenPrepareLegacyPass : public FunctionPass {
57public:
58 static char ID;
59
60 RISCVCodeGenPrepareLegacyPass() : FunctionPass(ID) {}
61
62 bool runOnFunction(Function &F) override;
63 StringRef getPassName() const override { return PASS_NAME; }
64
65 void getAnalysisUsage(AnalysisUsage &AU) const override {
66 AU.setPreservesCFG();
67 AU.addRequired<DominatorTreeWrapperPass>();
68 AU.addRequired<TargetPassConfig>();
69 }
70};
71} // namespace
72
73// Try to optimize (i64 (and (zext/sext (i32 X), C1))) if C1 has bit 31 set,
74// but bits 63:32 are zero. If we know that bit 31 of X is 0, we can fill
75// the upper 32 bits with ones.
76bool RISCVCodeGenPrepare::visitAnd(BinaryOperator &BO) {
77 if (!ST->is64Bit())
78 return false;
79
80 if (!BO.getType()->isIntegerTy(64))
81 return false;
82
83 using namespace PatternMatch;
84
85 // Left hand side should be a zext nneg.
86 Value *LHSSrc;
87 if (!match(BO.getOperand(0), m_NNegZExt(m_Value(LHSSrc))))
88 return false;
89
90 if (!LHSSrc->getType()->isIntegerTy(32))
91 return false;
92
93 // Right hand side should be a constant.
94 Value *RHS = BO.getOperand(1);
95
96 auto *CI = dyn_cast<ConstantInt>(RHS);
97 if (!CI)
98 return false;
99 uint64_t C = CI->getZExtValue();
100
101 // Look for constants that fit in 32 bits but not simm12, and can be made
102 // into simm12 by sign extending bit 31. This will allow use of ANDI.
103 // TODO: Is worth making simm32?
105 return false;
106
107 // Sign extend the constant and replace the And operand.
109 BO.setOperand(1, ConstantInt::get(RHS->getType(), C));
110
111 return true;
112}
113
114// With EVL tail folding, an AnyOf reduction will generate an i1 vp.merge like
115// follows:
116//
117// loop:
118// %phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
119// %cmp = icmp ...
120// %rec = call <vscale x 4 x i1> @llvm.vp.merge(%cmp, i1 true, %phi, %evl)
121// ...
122// middle:
123// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
124//
125// However RVV doesn't have any tail undisturbed mask instructions and so we
126// need a convoluted sequence of mask instructions to lower the i1 vp.merge: see
127// llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll.
128//
129// To avoid that this widens the i1 vp.merge to an i8 vp.merge, which will
130// generate a single vmerge.vim:
131//
132// loop:
133// %phi = phi <vscale x 4 x i8> [ zeroinitializer, %entry ], [ %rec, %loop ]
134// %cmp = icmp ...
135// %rec = call <vscale x 4 x i8> @llvm.vp.merge(%cmp, i8 true, %phi, %evl)
136// %trunc = trunc <vscale x 4 x i8> %rec to <vscale x 4 x i1>
137// ...
138// middle:
139// %res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
140//
141// The trunc will normally be sunk outside of the loop, but even if there are
142// users inside the loop it is still profitable.
143bool RISCVCodeGenPrepare::widenVPMerge(IntrinsicInst &II) {
144 if (!II.getType()->getScalarType()->isIntegerTy(1))
145 return false;
146
147 Value *Mask, *True, *PhiV, *EVL;
148 using namespace PatternMatch;
149 if (!match(&II,
151 m_Value(PhiV), m_Value(EVL))))
152 return false;
153
154 auto *Phi = dyn_cast<PHINode>(PhiV);
155 if (!Phi || !Phi->hasOneUse() || Phi->getNumIncomingValues() != 2 ||
156 !match(Phi->getIncomingValue(0), m_Zero()) ||
157 Phi->getIncomingValue(1) != &II)
158 return false;
159
160 Type *WideTy =
161 VectorType::get(IntegerType::getInt8Ty(II.getContext()),
162 cast<VectorType>(II.getType())->getElementCount());
163
164 IRBuilder<> Builder(Phi);
165 PHINode *WidePhi = Builder.CreatePHI(WideTy, 2);
167 Phi->getIncomingBlock(0));
168 Builder.SetInsertPoint(&II);
169 Value *WideTrue = Builder.CreateZExt(True, WideTy);
170 Value *WideMerge = Builder.CreateIntrinsic(Intrinsic::vp_merge, {WideTy},
171 {Mask, WideTrue, WidePhi, EVL});
172 WidePhi->addIncoming(WideMerge, Phi->getIncomingBlock(1));
173 Value *Trunc = Builder.CreateTrunc(WideMerge, II.getType());
174
175 II.replaceAllUsesWith(Trunc);
176
177 // Break the cycle and delete the old chain.
178 Phi->setIncomingValue(1, Phi->getIncomingValue(0));
180
181 return true;
182}
183
184// LLVM vector reduction intrinsics return a scalar result, but on RISC-V vector
185// reduction instructions write the result in the first element of a vector
186// register. So when a reduction in a loop uses a scalar phi, we end up with
187// unnecessary scalar moves:
188//
189// loop:
190// vfmv.s.f v10, fa0
191// vfredosum.vs v8, v8, v10
192// vfmv.f.s fa0, v8
193//
194// This mainly affects ordered fadd reductions and VP reductions that have a
195// scalar start value, since other types of reduction typically use element-wise
196// vectorisation in the loop body. This tries to vectorize any scalar phis that
197// feed into these reductions:
198//
199// loop:
200// %phi = phi <float> [ ..., %entry ], [ %acc, %loop ]
201// %acc = call float @llvm.vector.reduce.fadd.nxv2f32(float %phi,
202// <vscale x 2 x float> %vec)
203//
204// ->
205//
206// loop:
207// %phi = phi <vscale x 2 x float> [ ..., %entry ], [ %acc.vec, %loop ]
208// %phi.scalar = extractelement <vscale x 2 x float> %phi, i64 0
209// %acc = call float @llvm.vector.reduce.fadd.nxv2f32(float %x,
210// <vscale x 2 x float> %vec)
211// %acc.vec = insertelement <vscale x 2 x float> poison, float %acc.next, i64 0
212//
213// Which eliminates the scalar -> vector -> scalar crossing during instruction
214// selection.
215bool RISCVCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
216 if (expandVPStrideLoad(I))
217 return true;
218
219 if (widenVPMerge(I))
220 return true;
221
222 if (I.getIntrinsicID() != Intrinsic::vector_reduce_fadd &&
224 return false;
225
226 auto *PHI = dyn_cast<PHINode>(I.getOperand(0));
227 if (!PHI || !PHI->hasOneUse() ||
228 !llvm::is_contained(PHI->incoming_values(), &I))
229 return false;
230
231 Type *VecTy = I.getOperand(1)->getType();
232 IRBuilder<> Builder(PHI);
233 auto *VecPHI = Builder.CreatePHI(VecTy, PHI->getNumIncomingValues());
234
235 for (auto *BB : PHI->blocks()) {
236 Builder.SetInsertPoint(BB->getTerminator());
237 Value *InsertElt = Builder.CreateInsertElement(
238 VecTy, PHI->getIncomingValueForBlock(BB), (uint64_t)0);
239 VecPHI->addIncoming(InsertElt, BB);
240 }
241
242 Builder.SetInsertPoint(&I);
243 I.setOperand(0, Builder.CreateExtractElement(VecPHI, (uint64_t)0));
244
245 PHI->eraseFromParent();
246
247 return true;
248}
249
250// Always expand zero strided loads so we match more .vx splat patterns, even if
251// we have +optimized-zero-stride-loads. RISCVDAGToDAGISel::Select will convert
252// it back to a strided load if it's optimized.
253bool RISCVCodeGenPrepare::expandVPStrideLoad(IntrinsicInst &II) {
254 Value *BasePtr, *VL;
255
256 using namespace PatternMatch;
258 m_Value(BasePtr), m_Zero(), m_AllOnes(), m_Value(VL))))
259 return false;
260
261 // If SEW>XLEN then a splat will get lowered as a zero strided load anyway, so
262 // avoid expanding here.
263 if (II.getType()->getScalarSizeInBits() > ST->getXLen())
264 return false;
265
266 if (!isKnownNonZero(VL, {*DL, DT, nullptr, &II}))
267 return false;
268
269 auto *VTy = cast<VectorType>(II.getType());
270
271 IRBuilder<> Builder(&II);
272 Type *STy = VTy->getElementType();
273 Value *Val = Builder.CreateLoad(STy, BasePtr);
274 Value *Res = Builder.CreateIntrinsic(Intrinsic::experimental_vp_splat, {VTy},
275 {Val, II.getOperand(2), VL});
276
277 II.replaceAllUsesWith(Res);
278 II.eraseFromParent();
279 return true;
280}
281
282bool RISCVCodeGenPrepare::run() {
283 bool MadeChange = false;
284 for (auto &BB : F)
285 for (Instruction &I : llvm::make_early_inc_range(BB))
286 MadeChange |= visit(I);
287
288 return MadeChange;
289}
290
291bool RISCVCodeGenPrepareLegacyPass::runOnFunction(Function &F) {
292 if (skipFunction(F))
293 return false;
294
295 auto &TPC = getAnalysis<TargetPassConfig>();
296 auto &TM = TPC.getTM<RISCVTargetMachine>();
297 auto ST = &TM.getSubtarget<RISCVSubtarget>(F);
298 auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
299
300 RISCVCodeGenPrepare RVCGP(F, DT, ST);
301 return RVCGP.run();
302}
303
304INITIALIZE_PASS_BEGIN(RISCVCodeGenPrepareLegacyPass, DEBUG_TYPE, PASS_NAME,
305 false, false)
307INITIALIZE_PASS_END(RISCVCodeGenPrepareLegacyPass, DEBUG_TYPE, PASS_NAME, false,
308 false)
309
310char RISCVCodeGenPrepareLegacyPass::ID = 0;
311
313 return new RISCVCodeGenPrepareLegacyPass();
314}
315
318 DominatorTree *DT = &FAM.getResult<DominatorTreeAnalysis>(F);
319 auto ST = &TM->getSubtarget<RISCVSubtarget>(F);
320 bool Changed = RISCVCodeGenPrepare(F, DT, ST).run();
321 if (!Changed)
322 return PreservedAnalyses::all();
323
326 return PA;
327}
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
#define PASS_NAME
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
Target-Independent Code Generator Pass Configuration Options pass.
#define PASS_NAME
Value * RHS
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
Base class for instruction visitors.
Definition InstVisitor.h:78
A wrapper class for inspecting calls to intrinsic functions.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
unsigned getXLen() const
Target-Independent Code Generator Pass Configuration Options.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
Changed
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
FunctionPass * createRISCVCodeGenPrepareLegacyPass()
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.