LLVM 22.0.0git
AMDGPUCodeGenPrepare.cpp
Go to the documentation of this file.
1//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
18#include "llvm/ADT/SetVector.h"
26#include "llvm/IR/Dominators.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/InstVisitor.h"
29#include "llvm/IR/IntrinsicsAMDGPU.h"
31#include "llvm/IR/ValueHandle.h"
33#include "llvm/Pass.h"
38
39#define DEBUG_TYPE "amdgpu-codegenprepare"
40
41using namespace llvm;
42using namespace llvm::PatternMatch;
43
44namespace {
45
47 "amdgpu-codegenprepare-widen-constant-loads",
48 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
50 cl::init(false));
51
52static cl::opt<bool>
53 BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
54 cl::desc("Break large PHI nodes for DAGISel"),
56
57static cl::opt<bool>
58 ForceBreakLargePHIs("amdgpu-codegenprepare-force-break-large-phis",
59 cl::desc("For testing purposes, always break large "
60 "PHIs even if it isn't profitable."),
62
63static cl::opt<unsigned> BreakLargePHIsThreshold(
64 "amdgpu-codegenprepare-break-large-phis-threshold",
65 cl::desc("Minimum type size in bits for breaking large PHI nodes"),
67
68static cl::opt<bool> UseMul24Intrin(
69 "amdgpu-codegenprepare-mul24",
70 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
72 cl::init(true));
73
74// Legalize 64-bit division by using the generic IR expansion.
75static cl::opt<bool> ExpandDiv64InIR(
76 "amdgpu-codegenprepare-expand-div64",
77 cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
79 cl::init(false));
80
81// Leave all division operations as they are. This supersedes ExpandDiv64InIR
82// and is used for testing the legalizer.
83static cl::opt<bool> DisableIDivExpand(
84 "amdgpu-codegenprepare-disable-idiv-expansion",
85 cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
87 cl::init(false));
88
89// Disable processing of fdiv so we can better test the backend implementations.
90static cl::opt<bool> DisableFDivExpand(
91 "amdgpu-codegenprepare-disable-fdiv-expansion",
92 cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
94 cl::init(false));
95
96class AMDGPUCodeGenPrepareImpl
97 : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
98public:
99 Function &F;
100 const GCNSubtarget &ST;
101 const AMDGPUTargetMachine &TM;
102 const TargetLibraryInfo *TLI;
103 AssumptionCache *AC;
104 const DominatorTree *DT;
105 const UniformityInfo &UA;
106 const DataLayout &DL;
107 const bool HasFP32DenormalFlush;
108 bool FlowChanged = false;
109 mutable Function *SqrtF32 = nullptr;
110 mutable Function *LdexpF32 = nullptr;
111 mutable SmallVector<WeakVH> DeadVals;
112
113 DenseMap<const PHINode *, bool> BreakPhiNodesCache;
114
115 AMDGPUCodeGenPrepareImpl(Function &F, const AMDGPUTargetMachine &TM,
116 const TargetLibraryInfo *TLI, AssumptionCache *AC,
117 const DominatorTree *DT, const UniformityInfo &UA)
118 : F(F), ST(TM.getSubtarget<GCNSubtarget>(F)), TM(TM), TLI(TLI), AC(AC),
119 DT(DT), UA(UA), DL(F.getDataLayout()),
120 HasFP32DenormalFlush(SIModeRegisterDefaults(F, ST).FP32Denormals ==
122
123 Function *getSqrtF32() const {
124 if (SqrtF32)
125 return SqrtF32;
126
127 LLVMContext &Ctx = F.getContext();
129 F.getParent(), Intrinsic::amdgcn_sqrt, {Type::getFloatTy(Ctx)});
130 return SqrtF32;
131 }
132
133 Function *getLdexpF32() const {
134 if (LdexpF32)
135 return LdexpF32;
136
137 LLVMContext &Ctx = F.getContext();
139 F.getParent(), Intrinsic::ldexp,
140 {Type::getFloatTy(Ctx), Type::getInt32Ty(Ctx)});
141 return LdexpF32;
142 }
143
144 bool canBreakPHINode(const PHINode &I);
145
146 /// \returns True if binary operation \p I is a signed binary operation, false
147 /// otherwise.
148 bool isSigned(const BinaryOperator &I) const;
149
150 /// \returns True if the condition of 'select' operation \p I comes from a
151 /// signed 'icmp' operation, false otherwise.
152 bool isSigned(const SelectInst &I) const;
153
154 /// Return true if \p T is a legal scalar floating point type.
155 bool isLegalFloatingTy(const Type *T) const;
156
157 /// Wrapper to pass all the arguments to computeKnownFPClass
159 const Instruction *CtxI) const {
160 return llvm::computeKnownFPClass(V, DL, Interested, TLI, AC, CtxI, DT);
161 }
162
163 bool canIgnoreDenormalInput(const Value *V, const Instruction *CtxI) const {
164 return HasFP32DenormalFlush ||
166 }
167
168 /// \returns The minimum number of bits needed to store the value of \Op as an
169 /// unsigned integer. Truncating to this size and then zero-extending to
170 /// the original will not change the value.
171 unsigned numBitsUnsigned(Value *Op) const;
172
173 /// \returns The minimum number of bits needed to store the value of \Op as a
174 /// signed integer. Truncating to this size and then sign-extending to
175 /// the original size will not change the value.
176 unsigned numBitsSigned(Value *Op) const;
177
178 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
179 /// SelectionDAG has an issue where an and asserting the bits are known
180 bool replaceMulWithMul24(BinaryOperator &I) const;
181
182 /// Perform same function as equivalently named function in DAGCombiner. Since
183 /// we expand some divisions here, we need to perform this before obscuring.
184 bool foldBinOpIntoSelect(BinaryOperator &I) const;
185
186 bool divHasSpecialOptimization(BinaryOperator &I,
187 Value *Num, Value *Den) const;
188 unsigned getDivNumBits(BinaryOperator &I, Value *Num, Value *Den,
189 unsigned MaxDivBits, bool Signed) const;
190
191 /// Expands 24 bit div or rem.
192 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
193 Value *Num, Value *Den,
194 bool IsDiv, bool IsSigned) const;
195
196 Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
197 Value *Num, Value *Den, unsigned NumBits,
198 bool IsDiv, bool IsSigned) const;
199
200 /// Expands 32 bit div or rem.
201 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
202 Value *Num, Value *Den) const;
203
204 Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
205 Value *Num, Value *Den) const;
206 void expandDivRem64(BinaryOperator &I) const;
207
208 /// Widen a scalar load.
209 ///
210 /// \details \p Widen scalar load for uniform, small type loads from constant
211 // memory / to a full 32-bits and then truncate the input to allow a scalar
212 // load instead of a vector load.
213 //
214 /// \returns True.
215
216 bool canWidenScalarExtLoad(LoadInst &I) const;
217
218 Value *matchFractPat(IntrinsicInst &I);
219 Value *applyFractPat(IRBuilder<> &Builder, Value *FractArg);
220
221 bool canOptimizeWithRsq(const FPMathOperator *SqrtOp, FastMathFlags DivFMF,
222 FastMathFlags SqrtFMF) const;
223
224 Value *optimizeWithRsq(IRBuilder<> &Builder, Value *Num, Value *Den,
225 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
226 const Instruction *CtxI) const;
227
228 Value *optimizeWithRcp(IRBuilder<> &Builder, Value *Num, Value *Den,
229 FastMathFlags FMF, const Instruction *CtxI) const;
230 Value *optimizeWithFDivFast(IRBuilder<> &Builder, Value *Num, Value *Den,
231 float ReqdAccuracy) const;
232
233 Value *visitFDivElement(IRBuilder<> &Builder, Value *Num, Value *Den,
234 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
235 Value *RsqOp, const Instruction *FDiv,
236 float ReqdAccuracy) const;
237
238 std::pair<Value *, Value *> getFrexpResults(IRBuilder<> &Builder,
239 Value *Src) const;
240
241 Value *emitRcpIEEE1ULP(IRBuilder<> &Builder, Value *Src,
242 bool IsNegative) const;
243 Value *emitFrexpDiv(IRBuilder<> &Builder, Value *LHS, Value *RHS,
244 FastMathFlags FMF) const;
245 Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src,
246 FastMathFlags FMF) const;
247
248 bool tryNarrowMathIfNoOverflow(Instruction *I);
249
250public:
251 bool visitFDiv(BinaryOperator &I);
252
253 bool visitInstruction(Instruction &I) { return false; }
254 bool visitBinaryOperator(BinaryOperator &I);
255 bool visitLoadInst(LoadInst &I);
256 bool visitSelectInst(SelectInst &I);
257 bool visitPHINode(PHINode &I);
258 bool visitAddrSpaceCastInst(AddrSpaceCastInst &I);
259
260 bool visitIntrinsicInst(IntrinsicInst &I);
261 bool visitFMinLike(IntrinsicInst &I);
262 bool visitSqrt(IntrinsicInst &I);
263 bool run();
264};
265
266class AMDGPUCodeGenPrepare : public FunctionPass {
267public:
268 static char ID;
269 AMDGPUCodeGenPrepare() : FunctionPass(ID) {}
270 void getAnalysisUsage(AnalysisUsage &AU) const override {
274
275 // FIXME: Division expansion needs to preserve the dominator tree.
276 if (!ExpandDiv64InIR)
277 AU.setPreservesAll();
278 }
279 bool runOnFunction(Function &F) override;
280 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
281};
282
283} // end anonymous namespace
284
285bool AMDGPUCodeGenPrepareImpl::run() {
286 BreakPhiNodesCache.clear();
287 bool MadeChange = false;
288
289 // Need to use make_early_inc_range because integer division expansion is
290 // handled by Transform/Utils, and it can delete instructions such as the
291 // terminator of the BB.
292 for (BasicBlock &BB : reverse(F)) {
293 for (Instruction &I : make_early_inc_range(reverse(BB))) {
294 if (!isInstructionTriviallyDead(&I, TLI))
295 MadeChange |= visit(I);
296 }
297 }
298
299 while (!DeadVals.empty()) {
300 if (auto *I = dyn_cast_or_null<Instruction>(DeadVals.pop_back_val()))
302 }
303
304 return MadeChange;
305}
306
307bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
308 return I.getOpcode() == Instruction::AShr ||
309 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
310}
311
312bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
313 return isa<ICmpInst>(I.getOperand(0)) &&
314 cast<ICmpInst>(I.getOperand(0))->isSigned();
315}
316
317bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const {
318 return Ty->isFloatTy() || Ty->isDoubleTy() ||
319 (Ty->isHalfTy() && ST.has16BitInsts());
320}
321
322bool AMDGPUCodeGenPrepareImpl::canWidenScalarExtLoad(LoadInst &I) const {
323 Type *Ty = I.getType();
324 int TySize = DL.getTypeSizeInBits(Ty);
325 Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
326
327 return I.isSimple() && TySize < 32 && Alignment >= 4 && UA.isUniform(&I);
328}
329
330unsigned AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op) const {
331 return computeKnownBits(Op, DL, AC).countMaxActiveBits();
332}
333
334unsigned AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op) const {
335 return ComputeMaxSignificantBits(Op, DL, AC);
336}
337
338static void extractValues(IRBuilder<> &Builder,
339 SmallVectorImpl<Value *> &Values, Value *V) {
340 auto *VT = dyn_cast<FixedVectorType>(V->getType());
341 if (!VT) {
342 Values.push_back(V);
343 return;
344 }
345
346 for (int I = 0, E = VT->getNumElements(); I != E; ++I)
347 Values.push_back(Builder.CreateExtractElement(V, I));
348}
349
351 Type *Ty,
352 SmallVectorImpl<Value *> &Values) {
353 if (!Ty->isVectorTy()) {
354 assert(Values.size() == 1);
355 return Values[0];
356 }
357
358 Value *NewVal = PoisonValue::get(Ty);
359 for (int I = 0, E = Values.size(); I != E; ++I)
360 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
361
362 return NewVal;
363}
364
365bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
366 if (I.getOpcode() != Instruction::Mul)
367 return false;
368
369 Type *Ty = I.getType();
370 unsigned Size = Ty->getScalarSizeInBits();
371 if (Size <= 16 && ST.has16BitInsts())
372 return false;
373
374 // Prefer scalar if this could be s_mul_i32
375 if (UA.isUniform(&I))
376 return false;
377
378 Value *LHS = I.getOperand(0);
379 Value *RHS = I.getOperand(1);
380 IRBuilder<> Builder(&I);
381 Builder.SetCurrentDebugLocation(I.getDebugLoc());
382
383 unsigned LHSBits = 0, RHSBits = 0;
384 bool IsSigned = false;
385
386 if (ST.hasMulU24() && (LHSBits = numBitsUnsigned(LHS)) <= 24 &&
387 (RHSBits = numBitsUnsigned(RHS)) <= 24) {
388 IsSigned = false;
389
390 } else if (ST.hasMulI24() && (LHSBits = numBitsSigned(LHS)) <= 24 &&
391 (RHSBits = numBitsSigned(RHS)) <= 24) {
392 IsSigned = true;
393
394 } else
395 return false;
396
399 SmallVector<Value *, 4> ResultVals;
400 extractValues(Builder, LHSVals, LHS);
401 extractValues(Builder, RHSVals, RHS);
402
403 IntegerType *I32Ty = Builder.getInt32Ty();
404 IntegerType *IntrinTy = Size > 32 ? Builder.getInt64Ty() : I32Ty;
405 Type *DstTy = LHSVals[0]->getType();
406
407 for (int I = 0, E = LHSVals.size(); I != E; ++I) {
408 Value *LHS = IsSigned ? Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty)
409 : Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
410 Value *RHS = IsSigned ? Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty)
411 : Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
413 IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
414 Value *Result = Builder.CreateIntrinsic(ID, {IntrinTy}, {LHS, RHS});
415 Result = IsSigned ? Builder.CreateSExtOrTrunc(Result, DstTy)
416 : Builder.CreateZExtOrTrunc(Result, DstTy);
417 ResultVals.push_back(Result);
418 }
419
420 Value *NewVal = insertValues(Builder, Ty, ResultVals);
421 NewVal->takeName(&I);
422 I.replaceAllUsesWith(NewVal);
423 DeadVals.push_back(&I);
424
425 return true;
426}
427
428// Find a select instruction, which may have been casted. This is mostly to deal
429// with cases where i16 selects were promoted here to i32.
431 Cast = nullptr;
432 if (SelectInst *Sel = dyn_cast<SelectInst>(V))
433 return Sel;
434
435 if ((Cast = dyn_cast<CastInst>(V))) {
436 if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
437 return Sel;
438 }
439
440 return nullptr;
441}
442
443bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
444 // Don't do this unless the old select is going away. We want to eliminate the
445 // binary operator, not replace a binop with a select.
446 int SelOpNo = 0;
447
448 CastInst *CastOp;
449
450 // TODO: Should probably try to handle some cases with multiple
451 // users. Duplicating the select may be profitable for division.
452 SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
453 if (!Sel || !Sel->hasOneUse()) {
454 SelOpNo = 1;
455 Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
456 }
457
458 if (!Sel || !Sel->hasOneUse())
459 return false;
460
463 Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
464 if (!CBO || !CT || !CF)
465 return false;
466
467 if (CastOp) {
468 if (!CastOp->hasOneUse())
469 return false;
470 CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), DL);
471 CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), DL);
472 }
473
474 // TODO: Handle special 0/-1 cases DAG combine does, although we only really
475 // need to handle divisions here.
476 Constant *FoldedT =
477 SelOpNo ? ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, DL)
478 : ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, DL);
479 if (!FoldedT || isa<ConstantExpr>(FoldedT))
480 return false;
481
482 Constant *FoldedF =
483 SelOpNo ? ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, DL)
484 : ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, DL);
485 if (!FoldedF || isa<ConstantExpr>(FoldedF))
486 return false;
487
488 IRBuilder<> Builder(&BO);
489 Builder.SetCurrentDebugLocation(BO.getDebugLoc());
490 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
491 Builder.setFastMathFlags(FPOp->getFastMathFlags());
492
493 Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
494 FoldedT, FoldedF);
495 NewSelect->takeName(&BO);
496 BO.replaceAllUsesWith(NewSelect);
497 DeadVals.push_back(&BO);
498 if (CastOp)
499 DeadVals.push_back(CastOp);
500 DeadVals.push_back(Sel);
501 return true;
502}
503
504std::pair<Value *, Value *>
505AMDGPUCodeGenPrepareImpl::getFrexpResults(IRBuilder<> &Builder,
506 Value *Src) const {
507 Type *Ty = Src->getType();
508 Value *Frexp = Builder.CreateIntrinsic(Intrinsic::frexp,
509 {Ty, Builder.getInt32Ty()}, Src);
510 Value *FrexpMant = Builder.CreateExtractValue(Frexp, {0});
511
512 // Bypass the bug workaround for the exponent result since it doesn't matter.
513 // TODO: Does the bug workaround even really need to consider the exponent
514 // result? It's unspecified by the spec.
515
516 Value *FrexpExp =
517 ST.hasFractBug()
518 ? Builder.CreateIntrinsic(Intrinsic::amdgcn_frexp_exp,
519 {Builder.getInt32Ty(), Ty}, Src)
520 : Builder.CreateExtractValue(Frexp, {1});
521 return {FrexpMant, FrexpExp};
522}
523
524/// Emit an expansion of 1.0 / Src good for 1ulp that supports denormals.
525Value *AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(IRBuilder<> &Builder,
526 Value *Src,
527 bool IsNegative) const {
528 // Same as for 1.0, but expand the sign out of the constant.
529 // -1.0 / x -> rcp (fneg x)
530 if (IsNegative)
531 Src = Builder.CreateFNeg(Src);
532
533 // The rcp instruction doesn't support denormals, so scale the input
534 // out of the denormal range and convert at the end.
535 //
536 // Expand as 2^-n * (1.0 / (x * 2^n))
537
538 // TODO: Skip scaling if input is known never denormal and the input
539 // range won't underflow to denormal. The hard part is knowing the
540 // result. We need a range check, the result could be denormal for
541 // 0x1p+126 < den <= 0x1p+127.
542 auto [FrexpMant, FrexpExp] = getFrexpResults(Builder, Src);
543 Value *ScaleFactor = Builder.CreateNeg(FrexpExp);
544 Value *Rcp = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMant);
545 return Builder.CreateCall(getLdexpF32(), {Rcp, ScaleFactor});
546}
547
548/// Emit a 2ulp expansion for fdiv by using frexp for input scaling.
549Value *AMDGPUCodeGenPrepareImpl::emitFrexpDiv(IRBuilder<> &Builder, Value *LHS,
550 Value *RHS,
551 FastMathFlags FMF) const {
552 // If we have have to work around the fract/frexp bug, we're worse off than
553 // using the fdiv.fast expansion. The full safe expansion is faster if we have
554 // fast FMA.
555 if (HasFP32DenormalFlush && ST.hasFractBug() && !ST.hasFastFMAF32() &&
556 (!FMF.noNaNs() || !FMF.noInfs()))
557 return nullptr;
558
559 // We're scaling the LHS to avoid a denormal input, and scale the denominator
560 // to avoid large values underflowing the result.
561 auto [FrexpMantRHS, FrexpExpRHS] = getFrexpResults(Builder, RHS);
562
563 Value *Rcp =
564 Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMantRHS);
565
566 auto [FrexpMantLHS, FrexpExpLHS] = getFrexpResults(Builder, LHS);
567 Value *Mul = Builder.CreateFMul(FrexpMantLHS, Rcp);
568
569 // We multiplied by 2^N/2^M, so we need to multiply by 2^(N-M) to scale the
570 // result.
571 Value *ExpDiff = Builder.CreateSub(FrexpExpLHS, FrexpExpRHS);
572 return Builder.CreateCall(getLdexpF32(), {Mul, ExpDiff});
573}
574
575/// Emit a sqrt that handles denormals and is accurate to 2ulp.
576Value *AMDGPUCodeGenPrepareImpl::emitSqrtIEEE2ULP(IRBuilder<> &Builder,
577 Value *Src,
578 FastMathFlags FMF) const {
579 Type *Ty = Src->getType();
580 APFloat SmallestNormal =
582 Value *NeedScale =
583 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
584
585 ConstantInt *Zero = Builder.getInt32(0);
586 Value *InputScaleFactor =
587 Builder.CreateSelect(NeedScale, Builder.getInt32(32), Zero);
588
589 Value *Scaled = Builder.CreateCall(getLdexpF32(), {Src, InputScaleFactor});
590
591 Value *Sqrt = Builder.CreateCall(getSqrtF32(), Scaled);
592
593 Value *OutputScaleFactor =
594 Builder.CreateSelect(NeedScale, Builder.getInt32(-16), Zero);
595 return Builder.CreateCall(getLdexpF32(), {Sqrt, OutputScaleFactor});
596}
597
598/// Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
599static Value *emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src,
600 bool IsNegative) {
601 // bool need_scale = x < 0x1p-126f;
602 // float input_scale = need_scale ? 0x1.0p+24f : 1.0f;
603 // float output_scale = need_scale ? 0x1.0p+12f : 1.0f;
604 // rsq(x * input_scale) * output_scale;
605
606 Type *Ty = Src->getType();
607 APFloat SmallestNormal =
608 APFloat::getSmallestNormalized(Ty->getFltSemantics());
609 Value *NeedScale =
610 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
611 Constant *One = ConstantFP::get(Ty, 1.0);
612 Constant *InputScale = ConstantFP::get(Ty, 0x1.0p+24);
613 Constant *OutputScale =
614 ConstantFP::get(Ty, IsNegative ? -0x1.0p+12 : 0x1.0p+12);
615
616 Value *InputScaleFactor = Builder.CreateSelect(NeedScale, InputScale, One);
617
618 Value *ScaledInput = Builder.CreateFMul(Src, InputScaleFactor);
619 Value *Rsq = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, ScaledInput);
620 Value *OutputScaleFactor = Builder.CreateSelect(
621 NeedScale, OutputScale, IsNegative ? ConstantFP::get(Ty, -1.0) : One);
622
623 return Builder.CreateFMul(Rsq, OutputScaleFactor);
624}
625
626bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
627 FastMathFlags DivFMF,
628 FastMathFlags SqrtFMF) const {
629 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
630 if (!DivFMF.allowContract() || !SqrtFMF.allowContract())
631 return false;
632
633 // v_rsq_f32 gives 1ulp
634 return SqrtFMF.approxFunc() || SqrtOp->getFPAccuracy() >= 1.0f;
635}
636
637Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
638 IRBuilder<> &Builder, Value *Num, Value *Den, const FastMathFlags DivFMF,
639 const FastMathFlags SqrtFMF, const Instruction *CtxI) const {
640 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
641 assert(DivFMF.allowContract() && SqrtFMF.allowContract());
642
643 // rsq_f16 is accurate to 0.51 ulp.
644 // rsq_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
645 // rsq_f64 is never accurate.
646 const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num);
647 if (!CLHS)
648 return nullptr;
649
650 assert(Den->getType()->isFloatTy());
651
652 bool IsNegative = false;
653
654 // TODO: Handle other numerator values with arcp.
655 if (CLHS->isExactlyValue(1.0) || (IsNegative = CLHS->isExactlyValue(-1.0))) {
656 // Add in the sqrt flags.
657 IRBuilder<>::FastMathFlagGuard Guard(Builder);
658 Builder.setFastMathFlags(DivFMF | SqrtFMF);
659
660 if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
661 canIgnoreDenormalInput(Den, CtxI)) {
662 Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
663 // -1.0 / sqrt(x) -> fneg(rsq(x))
664 return IsNegative ? Builder.CreateFNeg(Result) : Result;
665 }
666
667 return emitRsqIEEE1ULP(Builder, Den, IsNegative);
668 }
669
670 return nullptr;
671}
672
673// Optimize fdiv with rcp:
674//
675// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
676// allowed with afn.
677//
678// a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
679Value *
680AMDGPUCodeGenPrepareImpl::optimizeWithRcp(IRBuilder<> &Builder, Value *Num,
681 Value *Den, FastMathFlags FMF,
682 const Instruction *CtxI) const {
683 // rcp_f16 is accurate to 0.51 ulp.
684 // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
685 // rcp_f64 is never accurate.
686 assert(Den->getType()->isFloatTy());
687
688 if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
689 bool IsNegative = false;
690 if (CLHS->isExactlyValue(1.0) ||
691 (IsNegative = CLHS->isExactlyValue(-1.0))) {
692 Value *Src = Den;
693
694 if (HasFP32DenormalFlush || FMF.approxFunc()) {
695 // -1.0 / x -> 1.0 / fneg(x)
696 if (IsNegative)
697 Src = Builder.CreateFNeg(Src);
698
699 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
700 // the CI documentation has a worst case error of 1 ulp.
701 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK
702 // to use it as long as we aren't trying to use denormals.
703 //
704 // v_rcp_f16 and v_rsq_f16 DO support denormals.
705
706 // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
707 // insert rsq intrinsic here.
708
709 // 1.0 / x -> rcp(x)
710 return Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Src);
711 }
712
713 // TODO: If the input isn't denormal, and we know the input exponent isn't
714 // big enough to introduce a denormal we can avoid the scaling.
715 return emitRcpIEEE1ULP(Builder, Src, IsNegative);
716 }
717 }
718
719 if (FMF.allowReciprocal()) {
720 // x / y -> x * (1.0 / y)
721
722 // TODO: Could avoid denormal scaling and use raw rcp if we knew the output
723 // will never underflow.
724 if (HasFP32DenormalFlush || FMF.approxFunc()) {
725 Value *Recip = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Den);
726 return Builder.CreateFMul(Num, Recip);
727 }
728
729 Value *Recip = emitRcpIEEE1ULP(Builder, Den, false);
730 return Builder.CreateFMul(Num, Recip);
731 }
732
733 return nullptr;
734}
735
736// optimize with fdiv.fast:
737//
738// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
739//
740// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
741//
742// NOTE: optimizeWithRcp should be tried first because rcp is the preference.
743Value *AMDGPUCodeGenPrepareImpl::optimizeWithFDivFast(
744 IRBuilder<> &Builder, Value *Num, Value *Den, float ReqdAccuracy) const {
745 // fdiv.fast can achieve 2.5 ULP accuracy.
746 if (ReqdAccuracy < 2.5f)
747 return nullptr;
748
749 // Only have fdiv.fast for f32.
750 assert(Den->getType()->isFloatTy());
751
752 bool NumIsOne = false;
753 if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
754 if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
755 NumIsOne = true;
756 }
757
758 // fdiv does not support denormals. But 1.0/x is always fine to use it.
759 //
760 // TODO: This works for any value with a specific known exponent range, don't
761 // just limit to constant 1.
762 if (!HasFP32DenormalFlush && !NumIsOne)
763 return nullptr;
764
765 return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {Num, Den});
766}
767
768Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
769 IRBuilder<> &Builder, Value *Num, Value *Den, FastMathFlags DivFMF,
770 FastMathFlags SqrtFMF, Value *RsqOp, const Instruction *FDivInst,
771 float ReqdDivAccuracy) const {
772 if (RsqOp) {
773 Value *Rsq =
774 optimizeWithRsq(Builder, Num, RsqOp, DivFMF, SqrtFMF, FDivInst);
775 if (Rsq)
776 return Rsq;
777 }
778
779 Value *Rcp = optimizeWithRcp(Builder, Num, Den, DivFMF, FDivInst);
780 if (Rcp)
781 return Rcp;
782
783 // In the basic case fdiv_fast has the same instruction count as the frexp div
784 // expansion. Slightly prefer fdiv_fast since it ends in an fmul that can
785 // potentially be fused into a user. Also, materialization of the constants
786 // can be reused for multiple instances.
787 Value *FDivFast = optimizeWithFDivFast(Builder, Num, Den, ReqdDivAccuracy);
788 if (FDivFast)
789 return FDivFast;
790
791 return emitFrexpDiv(Builder, Num, Den, DivFMF);
792}
793
794// Optimizations is performed based on fpmath, fast math flags as well as
795// denormals to optimize fdiv with either rcp or fdiv.fast.
796//
797// With rcp:
798// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
799// allowed with afn.
800//
801// a/b -> a*rcp(b) when inaccurate rcp is allowed with afn.
802//
803// With fdiv.fast:
804// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
805//
806// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
807//
808// NOTE: rcp is the preference in cases that both are legal.
809bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
810 if (DisableFDivExpand)
811 return false;
812
813 Type *Ty = FDiv.getType()->getScalarType();
814 if (!Ty->isFloatTy())
815 return false;
816
817 // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
818 // expansion around them in codegen. f16 is good enough to always use.
819
820 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
821 const FastMathFlags DivFMF = FPOp->getFastMathFlags();
822 const float ReqdAccuracy = FPOp->getFPAccuracy();
823
824 FastMathFlags SqrtFMF;
825
826 Value *Num = FDiv.getOperand(0);
827 Value *Den = FDiv.getOperand(1);
828
829 Value *RsqOp = nullptr;
830 auto *DenII = dyn_cast<IntrinsicInst>(Den);
831 if (DenII && DenII->getIntrinsicID() == Intrinsic::sqrt &&
832 DenII->hasOneUse()) {
833 const auto *SqrtOp = cast<FPMathOperator>(DenII);
834 SqrtFMF = SqrtOp->getFastMathFlags();
835 if (canOptimizeWithRsq(SqrtOp, DivFMF, SqrtFMF))
836 RsqOp = SqrtOp->getOperand(0);
837 }
838
839 // Inaccurate rcp is allowed with afn.
840 //
841 // Defer to codegen to handle this.
842 //
843 // TODO: Decide on an interpretation for interactions between afn + arcp +
844 // !fpmath, and make it consistent between here and codegen. For now, defer
845 // expansion of afn to codegen. The current interpretation is so aggressive we
846 // don't need any pre-consideration here when we have better information. A
847 // more conservative interpretation could use handling here.
848 const bool AllowInaccurateRcp = DivFMF.approxFunc();
849 if (!RsqOp && AllowInaccurateRcp)
850 return false;
851
852 // Defer the correct implementations to codegen.
853 if (ReqdAccuracy < 1.0f)
854 return false;
855
856 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
857 Builder.setFastMathFlags(DivFMF);
858 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
859
862 SmallVector<Value *, 4> RsqDenVals;
863 extractValues(Builder, NumVals, Num);
864 extractValues(Builder, DenVals, Den);
865
866 if (RsqOp)
867 extractValues(Builder, RsqDenVals, RsqOp);
868
869 SmallVector<Value *, 4> ResultVals(NumVals.size());
870 for (int I = 0, E = NumVals.size(); I != E; ++I) {
871 Value *NumElt = NumVals[I];
872 Value *DenElt = DenVals[I];
873 Value *RsqDenElt = RsqOp ? RsqDenVals[I] : nullptr;
874
875 Value *NewElt =
876 visitFDivElement(Builder, NumElt, DenElt, DivFMF, SqrtFMF, RsqDenElt,
877 cast<Instruction>(FPOp), ReqdAccuracy);
878 if (!NewElt) {
879 // Keep the original, but scalarized.
880
881 // This has the unfortunate side effect of sometimes scalarizing when
882 // we're not going to do anything.
883 NewElt = Builder.CreateFDiv(NumElt, DenElt);
884 if (auto *NewEltInst = dyn_cast<Instruction>(NewElt))
885 NewEltInst->copyMetadata(FDiv);
886 }
887
888 ResultVals[I] = NewElt;
889 }
890
891 Value *NewVal = insertValues(Builder, FDiv.getType(), ResultVals);
892
893 if (NewVal) {
894 FDiv.replaceAllUsesWith(NewVal);
895 NewVal->takeName(&FDiv);
896 DeadVals.push_back(&FDiv);
897 }
898
899 return true;
900}
901
902static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
903 Value *LHS, Value *RHS) {
904 Type *I32Ty = Builder.getInt32Ty();
905 Type *I64Ty = Builder.getInt64Ty();
906
907 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
908 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
909 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
910 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
911 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
912 Hi = Builder.CreateTrunc(Hi, I32Ty);
913 return std::pair(Lo, Hi);
914}
915
916static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
917 return getMul64(Builder, LHS, RHS).second;
918}
919
920/// Figure out how many bits are really needed for this division.
921/// \p MaxDivBits is an optimization hint to bypass the second
922/// ComputeNumSignBits/computeKnownBits call if the first one is
923/// insufficient.
924unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
925 Value *Den,
926 unsigned MaxDivBits,
927 bool IsSigned) const {
929 Den->getType()->getScalarSizeInBits());
930 unsigned SSBits = Num->getType()->getScalarSizeInBits();
931 if (IsSigned) {
932 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, AC, &I);
933 // A sign bit needs to be reserved for shrinking.
934 unsigned DivBits = SSBits - RHSSignBits + 1;
935 if (DivBits > MaxDivBits)
936 return SSBits;
937
938 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, AC, &I);
939
940 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
941 DivBits = SSBits - SignBits + 1;
942 return DivBits;
943 }
944
945 // All bits are used for unsigned division for Num or Den in range
946 // (SignedMax, UnsignedMax].
947 KnownBits Known = computeKnownBits(Den, DL, AC, &I);
948 if (Known.isNegative() || !Known.isNonNegative())
949 return SSBits;
950 unsigned RHSSignBits = Known.countMinLeadingZeros();
951 unsigned DivBits = SSBits - RHSSignBits;
952 if (DivBits > MaxDivBits)
953 return SSBits;
954
955 Known = computeKnownBits(Num, DL, AC, &I);
956 if (Known.isNegative() || !Known.isNonNegative())
957 return SSBits;
958 unsigned LHSSignBits = Known.countMinLeadingZeros();
959
960 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
961 DivBits = SSBits - SignBits;
962 return DivBits;
963}
964
965// The fractional part of a float is enough to accurately represent up to
966// a 24-bit signed integer.
967Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
968 BinaryOperator &I, Value *Num,
969 Value *Den, bool IsDiv,
970 bool IsSigned) const {
971 unsigned DivBits = getDivNumBits(I, Num, Den, 24, IsSigned);
972 if (DivBits > 24)
973 return nullptr;
974 return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
975}
976
977Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
978 IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den,
979 unsigned DivBits, bool IsDiv, bool IsSigned) const {
980 Type *I32Ty = Builder.getInt32Ty();
981 Num = Builder.CreateTrunc(Num, I32Ty);
982 Den = Builder.CreateTrunc(Den, I32Ty);
983
984 Type *F32Ty = Builder.getFloatTy();
985 ConstantInt *One = Builder.getInt32(1);
986 Value *JQ = One;
987
988 if (IsSigned) {
989 // char|short jq = ia ^ ib;
990 JQ = Builder.CreateXor(Num, Den);
991
992 // jq = jq >> (bitsize - 2)
993 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
994
995 // jq = jq | 0x1
996 JQ = Builder.CreateOr(JQ, One);
997 }
998
999 // int ia = (int)LHS;
1000 Value *IA = Num;
1001
1002 // int ib, (int)RHS;
1003 Value *IB = Den;
1004
1005 // float fa = (float)ia;
1006 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
1007 : Builder.CreateUIToFP(IA, F32Ty);
1008
1009 // float fb = (float)ib;
1010 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
1011 : Builder.CreateUIToFP(IB,F32Ty);
1012
1013 Value *RCP = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp,
1014 Builder.getFloatTy(), {FB});
1015 Value *FQM = Builder.CreateFMul(FA, RCP);
1016
1017 // fq = trunc(fqm);
1018 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
1019 FQ->copyFastMathFlags(Builder.getFastMathFlags());
1020
1021 // float fqneg = -fq;
1022 Value *FQNeg = Builder.CreateFNeg(FQ);
1023
1024 // float fr = mad(fqneg, fb, fa);
1025 auto FMAD = !ST.hasMadMacF32Insts()
1026 ? Intrinsic::fma
1027 : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
1028 Value *FR = Builder.CreateIntrinsic(FMAD,
1029 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
1030
1031 // int iq = (int)fq;
1032 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
1033 : Builder.CreateFPToUI(FQ, I32Ty);
1034
1035 // fr = fabs(fr);
1036 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
1037
1038 // fb = fabs(fb);
1039 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
1040
1041 // int cv = fr >= fb;
1042 Value *CV = Builder.CreateFCmpOGE(FR, FB);
1043
1044 // jq = (cv ? jq : 0);
1045 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
1046
1047 // dst = iq + jq;
1048 Value *Div = Builder.CreateAdd(IQ, JQ);
1049
1050 Value *Res = Div;
1051 if (!IsDiv) {
1052 // Rem needs compensation, it's easier to recompute it
1053 Value *Rem = Builder.CreateMul(Div, Den);
1054 Res = Builder.CreateSub(Num, Rem);
1055 }
1056
1057 if (DivBits != 0 && DivBits < 32) {
1058 // Extend in register from the number of bits this divide really is.
1059 if (IsSigned) {
1060 int InRegBits = 32 - DivBits;
1061
1062 Res = Builder.CreateShl(Res, InRegBits);
1063 Res = Builder.CreateAShr(Res, InRegBits);
1064 } else {
1065 ConstantInt *TruncMask
1066 = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
1067 Res = Builder.CreateAnd(Res, TruncMask);
1068 }
1069 }
1070
1071 return Res;
1072}
1073
1074// Try to recognize special cases the DAG will emit special, better expansions
1075// than the general expansion we do here.
1076
1077// TODO: It would be better to just directly handle those optimizations here.
1078bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
1079 Value *Num,
1080 Value *Den) const {
1081 if (Constant *C = dyn_cast<Constant>(Den)) {
1082 // Arbitrary constants get a better expansion as long as a wider mulhi is
1083 // legal.
1084 if (C->getType()->getScalarSizeInBits() <= 32)
1085 return true;
1086
1087 // TODO: Sdiv check for not exact for some reason.
1088
1089 // If there's no wider mulhi, there's only a better expansion for powers of
1090 // two.
1091 // TODO: Should really know for each vector element.
1092 if (isKnownToBeAPowerOfTwo(C, DL, true, AC, &I, DT))
1093 return true;
1094
1095 return false;
1096 }
1097
1098 if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
1099 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1100 if (BinOpDen->getOpcode() == Instruction::Shl &&
1101 isa<Constant>(BinOpDen->getOperand(0)) &&
1102 isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), DL, true, AC, &I, DT)) {
1103 return true;
1104 }
1105 }
1106
1107 return false;
1108}
1109
1110static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout DL) {
1111 // Check whether the sign can be determined statically.
1112 KnownBits Known = computeKnownBits(V, DL);
1113 if (Known.isNegative())
1114 return Constant::getAllOnesValue(V->getType());
1115 if (Known.isNonNegative())
1116 return Constant::getNullValue(V->getType());
1117 return Builder.CreateAShr(V, Builder.getInt32(31));
1118}
1119
1120Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
1121 BinaryOperator &I, Value *X,
1122 Value *Y) const {
1123 Instruction::BinaryOps Opc = I.getOpcode();
1124 assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1125 Opc == Instruction::SRem || Opc == Instruction::SDiv);
1126
1127 FastMathFlags FMF;
1128 FMF.setFast();
1129 Builder.setFastMathFlags(FMF);
1130
1131 if (divHasSpecialOptimization(I, X, Y))
1132 return nullptr; // Keep it for later optimization.
1133
1134 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1135 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1136
1137 Type *Ty = X->getType();
1138 Type *I32Ty = Builder.getInt32Ty();
1139 Type *F32Ty = Builder.getFloatTy();
1140
1141 if (Ty->getScalarSizeInBits() != 32) {
1142 if (IsSigned) {
1143 X = Builder.CreateSExtOrTrunc(X, I32Ty);
1144 Y = Builder.CreateSExtOrTrunc(Y, I32Ty);
1145 } else {
1146 X = Builder.CreateZExtOrTrunc(X, I32Ty);
1147 Y = Builder.CreateZExtOrTrunc(Y, I32Ty);
1148 }
1149 }
1150
1151 if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1152 return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1153 Builder.CreateZExtOrTrunc(Res, Ty);
1154 }
1155
1156 ConstantInt *Zero = Builder.getInt32(0);
1157 ConstantInt *One = Builder.getInt32(1);
1158
1159 Value *Sign = nullptr;
1160 if (IsSigned) {
1161 Value *SignX = getSign32(X, Builder, DL);
1162 Value *SignY = getSign32(Y, Builder, DL);
1163 // Remainder sign is the same as LHS
1164 Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1165
1166 X = Builder.CreateAdd(X, SignX);
1167 Y = Builder.CreateAdd(Y, SignY);
1168
1169 X = Builder.CreateXor(X, SignX);
1170 Y = Builder.CreateXor(Y, SignY);
1171 }
1172
1173 // The algorithm here is based on ideas from "Software Integer Division", Tom
1174 // Rodeheffer, August 2008.
1175 //
1176 // unsigned udiv(unsigned x, unsigned y) {
1177 // // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1178 // // that this is a lower bound on inv(y), even if some of the calculations
1179 // // round up.
1180 // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1181 //
1182 // // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1183 // // Empirically this is guaranteed to give a "two-y" lower bound on
1184 // // inv(y).
1185 // z += umulh(z, -y * z);
1186 //
1187 // // Quotient/remainder estimate.
1188 // unsigned q = umulh(x, z);
1189 // unsigned r = x - q * y;
1190 //
1191 // // Two rounds of quotient/remainder refinement.
1192 // if (r >= y) {
1193 // ++q;
1194 // r -= y;
1195 // }
1196 // if (r >= y) {
1197 // ++q;
1198 // r -= y;
1199 // }
1200 //
1201 // return q;
1202 // }
1203
1204 // Initial estimate of inv(y).
1205 Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1206 Value *RcpY = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp, F32Ty, {FloatY});
1207 Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
1208 Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1209 Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1210
1211 // One round of UNR.
1212 Value *NegY = Builder.CreateSub(Zero, Y);
1213 Value *NegYZ = Builder.CreateMul(NegY, Z);
1214 Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1215
1216 // Quotient/remainder estimate.
1217 Value *Q = getMulHu(Builder, X, Z);
1218 Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1219
1220 // First quotient/remainder refinement.
1221 Value *Cond = Builder.CreateICmpUGE(R, Y);
1222 if (IsDiv)
1223 Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1224 R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1225
1226 // Second quotient/remainder refinement.
1227 Cond = Builder.CreateICmpUGE(R, Y);
1228 Value *Res;
1229 if (IsDiv)
1230 Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1231 else
1232 Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1233
1234 if (IsSigned) {
1235 Res = Builder.CreateXor(Res, Sign);
1236 Res = Builder.CreateSub(Res, Sign);
1237 Res = Builder.CreateSExtOrTrunc(Res, Ty);
1238 } else {
1239 Res = Builder.CreateZExtOrTrunc(Res, Ty);
1240 }
1241 return Res;
1242}
1243
1244Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
1245 BinaryOperator &I, Value *Num,
1246 Value *Den) const {
1247 if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1248 return nullptr; // Keep it for later optimization.
1249
1250 Instruction::BinaryOps Opc = I.getOpcode();
1251
1252 bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1253 bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1254
1255 unsigned NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1256 if (NumDivBits > 32)
1257 return nullptr;
1258
1259 Value *Narrowed = nullptr;
1260 if (NumDivBits <= 24) {
1261 Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1262 IsDiv, IsSigned);
1263 } else if (NumDivBits <= 32) {
1264 Narrowed = expandDivRem32(Builder, I, Num, Den);
1265 }
1266
1267 if (Narrowed) {
1268 return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1269 Builder.CreateZExt(Narrowed, Num->getType());
1270 }
1271
1272 return nullptr;
1273}
1274
1275void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
1276 Instruction::BinaryOps Opc = I.getOpcode();
1277 // Do the general expansion.
1278 if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1280 return;
1281 }
1282
1283 if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1285 return;
1286 }
1287
1288 llvm_unreachable("not a division");
1289}
1290
1291/*
1292This will cause non-byte load in consistency, for example:
1293```
1294 %load = load i1, ptr addrspace(4) %arg, align 4
1295 %zext = zext i1 %load to
1296 i64 %add = add i64 %zext
1297```
1298Instead of creating `s_and_b32 s0, s0, 1`,
1299it will create `s_and_b32 s0, s0, 0xff`.
1300We accept this change since the non-byte load assumes the upper bits
1301within the byte are all 0.
1302*/
1303bool AMDGPUCodeGenPrepareImpl::tryNarrowMathIfNoOverflow(Instruction *I) {
1304 unsigned Opc = I->getOpcode();
1305 Type *OldType = I->getType();
1306
1307 if (Opc != Instruction::Add && Opc != Instruction::Mul)
1308 return false;
1309
1310 unsigned OrigBit = OldType->getScalarSizeInBits();
1311
1312 if (Opc != Instruction::Add && Opc != Instruction::Mul)
1313 llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and "
1314 "Instruction::Mul.");
1315
1316 unsigned MaxBitsNeeded = computeKnownBits(I, DL).countMaxActiveBits();
1317
1318 MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
1319 Type *NewType = DL.getSmallestLegalIntType(I->getContext(), MaxBitsNeeded);
1320 if (!NewType)
1321 return false;
1322 unsigned NewBit = NewType->getIntegerBitWidth();
1323 if (NewBit >= OrigBit)
1324 return false;
1325 NewType = I->getType()->getWithNewBitWidth(NewBit);
1326
1327 // Old cost
1328 const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F);
1329 InstructionCost OldCost =
1331 // New cost of new op
1332 InstructionCost NewCost =
1334 // New cost of narrowing 2 operands (use trunc)
1335 int NumOfNonConstOps = 2;
1336 if (isa<Constant>(I->getOperand(0)) || isa<Constant>(I->getOperand(1))) {
1337 // Cannot be both constant, should be propagated
1338 NumOfNonConstOps = 1;
1339 }
1340 NewCost += NumOfNonConstOps * TTI.getCastInstrCost(Instruction::Trunc,
1341 NewType, OldType,
1344 // New cost of zext narrowed result to original type
1345 NewCost +=
1346 TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType,
1348 if (NewCost >= OldCost)
1349 return false;
1350
1351 IRBuilder<> Builder(I);
1352 Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType);
1353 Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType);
1354 Value *Arith =
1355 Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1);
1356
1357 Value *Zext = Builder.CreateZExt(Arith, OldType);
1358 I->replaceAllUsesWith(Zext);
1359 DeadVals.push_back(I);
1360 return true;
1361}
1362
1363bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
1364 if (foldBinOpIntoSelect(I))
1365 return true;
1366
1367 if (UseMul24Intrin && replaceMulWithMul24(I))
1368 return true;
1369 if (tryNarrowMathIfNoOverflow(&I))
1370 return true;
1371
1372 bool Changed = false;
1373 Instruction::BinaryOps Opc = I.getOpcode();
1374 Type *Ty = I.getType();
1375 Value *NewDiv = nullptr;
1376 unsigned ScalarSize = Ty->getScalarSizeInBits();
1377
1379
1380 if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1381 Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1382 ScalarSize <= 64 &&
1383 !DisableIDivExpand) {
1384 Value *Num = I.getOperand(0);
1385 Value *Den = I.getOperand(1);
1386 IRBuilder<> Builder(&I);
1387 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1388
1389 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1390 NewDiv = PoisonValue::get(VT);
1391
1392 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1393 Value *NumEltN = Builder.CreateExtractElement(Num, N);
1394 Value *DenEltN = Builder.CreateExtractElement(Den, N);
1395
1396 Value *NewElt;
1397 if (ScalarSize <= 32) {
1398 NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1399 if (!NewElt)
1400 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1401 } else {
1402 // See if this 64-bit division can be shrunk to 32/24-bits before
1403 // producing the general expansion.
1404 NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1405 if (!NewElt) {
1406 // The general 64-bit expansion introduces control flow and doesn't
1407 // return the new value. Just insert a scalar copy and defer
1408 // expanding it.
1409 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1410 // CreateBinOp does constant folding. If the operands are constant,
1411 // it will return a Constant instead of a BinaryOperator.
1412 if (auto *NewEltBO = dyn_cast<BinaryOperator>(NewElt))
1413 Div64ToExpand.push_back(NewEltBO);
1414 }
1415 }
1416
1417 if (auto *NewEltI = dyn_cast<Instruction>(NewElt))
1418 NewEltI->copyIRFlags(&I);
1419
1420 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1421 }
1422 } else {
1423 if (ScalarSize <= 32)
1424 NewDiv = expandDivRem32(Builder, I, Num, Den);
1425 else {
1426 NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1427 if (!NewDiv)
1428 Div64ToExpand.push_back(&I);
1429 }
1430 }
1431
1432 if (NewDiv) {
1433 I.replaceAllUsesWith(NewDiv);
1434 DeadVals.push_back(&I);
1435 Changed = true;
1436 }
1437 }
1438
1439 if (ExpandDiv64InIR) {
1440 // TODO: We get much worse code in specially handled constant cases.
1441 for (BinaryOperator *Div : Div64ToExpand) {
1442 expandDivRem64(*Div);
1443 FlowChanged = true;
1444 Changed = true;
1445 }
1446 }
1447
1448 return Changed;
1449}
1450
1451bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
1452 if (!WidenLoads)
1453 return false;
1454
1455 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1456 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1457 canWidenScalarExtLoad(I)) {
1458 IRBuilder<> Builder(&I);
1459 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1460
1461 Type *I32Ty = Builder.getInt32Ty();
1462 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, I.getPointerOperand());
1463 WidenLoad->copyMetadata(I);
1464
1465 // If we have range metadata, we need to convert the type, and not make
1466 // assumptions about the high bits.
1467 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1468 ConstantInt *Lower =
1469 mdconst::extract<ConstantInt>(Range->getOperand(0));
1470
1471 if (Lower->isNullValue()) {
1472 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1473 } else {
1474 Metadata *LowAndHigh[] = {
1475 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1476 // Don't make assumptions about the high bits.
1477 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
1478 };
1479
1480 WidenLoad->setMetadata(LLVMContext::MD_range,
1481 MDNode::get(F.getContext(), LowAndHigh));
1482 }
1483 }
1484
1485 int TySize = DL.getTypeSizeInBits(I.getType());
1486 Type *IntNTy = Builder.getIntNTy(TySize);
1487 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1488 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1489 I.replaceAllUsesWith(ValOrig);
1490 DeadVals.push_back(&I);
1491 return true;
1492 }
1493
1494 return false;
1495}
1496
1497bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
1498 Value *Cond = I.getCondition();
1499 Value *TrueVal = I.getTrueValue();
1500 Value *FalseVal = I.getFalseValue();
1501 Value *CmpVal;
1502 CmpPredicate Pred;
1503
1504 // Match fract pattern with nan check.
1505 if (!match(Cond, m_FCmp(Pred, m_Value(CmpVal), m_NonNaN())))
1506 return false;
1507
1508 FPMathOperator *FPOp = dyn_cast<FPMathOperator>(&I);
1509 if (!FPOp)
1510 return false;
1511
1512 IRBuilder<> Builder(&I);
1513 Builder.setFastMathFlags(FPOp->getFastMathFlags());
1514
1515 auto *IITrue = dyn_cast<IntrinsicInst>(TrueVal);
1516 auto *IIFalse = dyn_cast<IntrinsicInst>(FalseVal);
1517
1518 Value *Fract = nullptr;
1519 if (Pred == FCmpInst::FCMP_UNO && TrueVal == CmpVal && IIFalse &&
1520 CmpVal == matchFractPat(*IIFalse)) {
1521 // isnan(x) ? x : fract(x)
1522 Fract = applyFractPat(Builder, CmpVal);
1523 } else if (Pred == FCmpInst::FCMP_ORD && FalseVal == CmpVal && IITrue &&
1524 CmpVal == matchFractPat(*IITrue)) {
1525 // !isnan(x) ? fract(x) : x
1526 Fract = applyFractPat(Builder, CmpVal);
1527 } else
1528 return false;
1529
1530 Fract->takeName(&I);
1531 I.replaceAllUsesWith(Fract);
1532 DeadVals.push_back(&I);
1533 return true;
1534}
1535
1536static bool areInSameBB(const Value *A, const Value *B) {
1537 const auto *IA = dyn_cast<Instruction>(A);
1538 const auto *IB = dyn_cast<Instruction>(B);
1539 return IA && IB && IA->getParent() == IB->getParent();
1540}
1541
1542// Helper for breaking large PHIs that returns true when an extractelement on V
1543// is likely to be folded away by the DAG combiner.
1545 const auto *FVT = dyn_cast<FixedVectorType>(V->getType());
1546 if (!FVT)
1547 return false;
1548
1549 const Value *CurVal = V;
1550
1551 // Check for insertelements, keeping track of the elements covered.
1552 BitVector EltsCovered(FVT->getNumElements());
1553 while (const auto *IE = dyn_cast<InsertElementInst>(CurVal)) {
1554 const auto *Idx = dyn_cast<ConstantInt>(IE->getOperand(2));
1555
1556 // Non constant index/out of bounds index -> folding is unlikely.
1557 // The latter is more of a sanity check because canonical IR should just
1558 // have replaced those with poison.
1559 if (!Idx || Idx->getZExtValue() >= FVT->getNumElements())
1560 return false;
1561
1562 const auto *VecSrc = IE->getOperand(0);
1563
1564 // If the vector source is another instruction, it must be in the same basic
1565 // block. Otherwise, the DAGCombiner won't see the whole thing and is
1566 // unlikely to be able to do anything interesting here.
1567 if (isa<Instruction>(VecSrc) && !areInSameBB(VecSrc, IE))
1568 return false;
1569
1570 CurVal = VecSrc;
1571 EltsCovered.set(Idx->getZExtValue());
1572
1573 // All elements covered.
1574 if (EltsCovered.all())
1575 return true;
1576 }
1577
1578 // We either didn't find a single insertelement, or the insertelement chain
1579 // ended before all elements were covered. Check for other interesting values.
1580
1581 // Constants are always interesting because we can just constant fold the
1582 // extractelements.
1583 if (isa<Constant>(CurVal))
1584 return true;
1585
1586 // shufflevector is likely to be profitable if either operand is a constant,
1587 // or if either source is in the same block.
1588 // This is because shufflevector is most often lowered as a series of
1589 // insert/extract elements anyway.
1590 if (const auto *SV = dyn_cast<ShuffleVectorInst>(CurVal)) {
1591 return isa<Constant>(SV->getOperand(1)) ||
1592 areInSameBB(SV, SV->getOperand(0)) ||
1593 areInSameBB(SV, SV->getOperand(1));
1594 }
1595
1596 return false;
1597}
1598
1599static void collectPHINodes(const PHINode &I,
1601 const auto [It, Inserted] = SeenPHIs.insert(&I);
1602 if (!Inserted)
1603 return;
1604
1605 for (const Value *Inc : I.incoming_values()) {
1606 if (const auto *PhiInc = dyn_cast<PHINode>(Inc))
1607 collectPHINodes(*PhiInc, SeenPHIs);
1608 }
1609
1610 for (const User *U : I.users()) {
1611 if (const auto *PhiU = dyn_cast<PHINode>(U))
1612 collectPHINodes(*PhiU, SeenPHIs);
1613 }
1614}
1615
1616bool AMDGPUCodeGenPrepareImpl::canBreakPHINode(const PHINode &I) {
1617 // Check in the cache first.
1618 if (const auto It = BreakPhiNodesCache.find(&I);
1619 It != BreakPhiNodesCache.end())
1620 return It->second;
1621
1622 // We consider PHI nodes as part of "chains", so given a PHI node I, we
1623 // recursively consider all its users and incoming values that are also PHI
1624 // nodes. We then make a decision about all of those PHIs at once. Either they
1625 // all get broken up, or none of them do. That way, we avoid cases where a
1626 // single PHI is/is not broken and we end up reforming/exploding a vector
1627 // multiple times, or even worse, doing it in a loop.
1628 SmallPtrSet<const PHINode *, 8> WorkList;
1629 collectPHINodes(I, WorkList);
1630
1631#ifndef NDEBUG
1632 // Check that none of the PHI nodes in the worklist are in the map. If some of
1633 // them are, it means we're not good enough at collecting related PHIs.
1634 for (const PHINode *WLP : WorkList) {
1635 assert(BreakPhiNodesCache.count(WLP) == 0);
1636 }
1637#endif
1638
1639 // To consider a PHI profitable to break, we need to see some interesting
1640 // incoming values. At least 2/3rd (rounded up) of all PHIs in the worklist
1641 // must have one to consider all PHIs breakable.
1642 //
1643 // This threshold has been determined through performance testing.
1644 //
1645 // Note that the computation below is equivalent to
1646 //
1647 // (unsigned)ceil((K / 3.0) * 2)
1648 //
1649 // It's simply written this way to avoid mixing integral/FP arithmetic.
1650 const auto Threshold = (alignTo(WorkList.size() * 2, 3) / 3);
1651 unsigned NumBreakablePHIs = 0;
1652 bool CanBreak = false;
1653 for (const PHINode *Cur : WorkList) {
1654 // Don't break PHIs that have no interesting incoming values. That is, where
1655 // there is no clear opportunity to fold the "extractelement" instructions
1656 // we would add.
1657 //
1658 // Note: IC does not run after this pass, so we're only interested in the
1659 // foldings that the DAG combiner can do.
1660 if (any_of(Cur->incoming_values(), isInterestingPHIIncomingValue)) {
1661 if (++NumBreakablePHIs >= Threshold) {
1662 CanBreak = true;
1663 break;
1664 }
1665 }
1666 }
1667
1668 for (const PHINode *Cur : WorkList)
1669 BreakPhiNodesCache[Cur] = CanBreak;
1670
1671 return CanBreak;
1672}
1673
1674/// Helper class for "break large PHIs" (visitPHINode).
1675///
1676/// This represents a slice of a PHI's incoming value, which is made up of:
1677/// - The type of the slice (Ty)
1678/// - The index in the incoming value's vector where the slice starts (Idx)
1679/// - The number of elements in the slice (NumElts).
1680/// It also keeps track of the NewPHI node inserted for this particular slice.
1681///
1682/// Slice examples:
1683/// <4 x i64> -> Split into four i64 slices.
1684/// -> [i64, 0, 1], [i64, 1, 1], [i64, 2, 1], [i64, 3, 1]
1685/// <5 x i16> -> Split into 2 <2 x i16> slices + a i16 tail.
1686/// -> [<2 x i16>, 0, 2], [<2 x i16>, 2, 2], [i16, 4, 1]
1688public:
1689 VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
1690 : Ty(Ty), Idx(Idx), NumElts(NumElts) {}
1691
1692 Type *Ty = nullptr;
1693 unsigned Idx = 0;
1694 unsigned NumElts = 0;
1695 PHINode *NewPHI = nullptr;
1696
1697 /// Slice \p Inc according to the information contained within this slice.
1698 /// This is cached, so if called multiple times for the same \p BB & \p Inc
1699 /// pair, it returns the same Sliced value as well.
1700 ///
1701 /// Note this *intentionally* does not return the same value for, say,
1702 /// [%bb.0, %0] & [%bb.1, %0] as:
1703 /// - It could cause issues with dominance (e.g. if bb.1 is seen first, then
1704 /// the value in bb.1 may not be reachable from bb.0 if it's its
1705 /// predecessor.)
1706 /// - We also want to make our extract instructions as local as possible so
1707 /// the DAG has better chances of folding them out. Duplicating them like
1708 /// that is beneficial in that regard.
1709 ///
1710 /// This is both a minor optimization to avoid creating duplicate
1711 /// instructions, but also a requirement for correctness. It is not forbidden
1712 /// for a PHI node to have the same [BB, Val] pair multiple times. If we
1713 /// returned a new value each time, those previously identical pairs would all
1714 /// have different incoming values (from the same block) and it'd cause a "PHI
1715 /// node has multiple entries for the same basic block with different incoming
1716 /// values!" verifier error.
1717 Value *getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName) {
1718 Value *&Res = SlicedVals[{BB, Inc}];
1719 if (Res)
1720 return Res;
1721
1723 if (Instruction *IncInst = dyn_cast<Instruction>(Inc))
1724 B.SetCurrentDebugLocation(IncInst->getDebugLoc());
1725
1726 if (NumElts > 1) {
1728 for (unsigned K = Idx; K < (Idx + NumElts); ++K)
1729 Mask.push_back(K);
1730 Res = B.CreateShuffleVector(Inc, Mask, NewValName);
1731 } else
1732 Res = B.CreateExtractElement(Inc, Idx, NewValName);
1733
1734 return Res;
1735 }
1736
1737private:
1739};
1740
1741bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
1742 // Break-up fixed-vector PHIs into smaller pieces.
1743 // Default threshold is 32, so it breaks up any vector that's >32 bits into
1744 // its elements, or into 32-bit pieces (for 8/16 bit elts).
1745 //
1746 // This is only helpful for DAGISel because it doesn't handle large PHIs as
1747 // well as GlobalISel. DAGISel lowers PHIs by using CopyToReg/CopyFromReg.
1748 // With large, odd-sized PHIs we may end up needing many `build_vector`
1749 // operations with most elements being "undef". This inhibits a lot of
1750 // optimization opportunities and can result in unreasonably high register
1751 // pressure and the inevitable stack spilling.
1752 if (!BreakLargePHIs || getCGPassBuilderOption().EnableGlobalISelOption)
1753 return false;
1754
1755 FixedVectorType *FVT = dyn_cast<FixedVectorType>(I.getType());
1756 if (!FVT || FVT->getNumElements() == 1 ||
1757 DL.getTypeSizeInBits(FVT) <= BreakLargePHIsThreshold)
1758 return false;
1759
1760 if (!ForceBreakLargePHIs && !canBreakPHINode(I))
1761 return false;
1762
1763 std::vector<VectorSlice> Slices;
1764
1765 Type *EltTy = FVT->getElementType();
1766 {
1767 unsigned Idx = 0;
1768 // For 8/16 bits type, don't scalarize fully but break it up into as many
1769 // 32-bit slices as we can, and scalarize the tail.
1770 const unsigned EltSize = DL.getTypeSizeInBits(EltTy);
1771 const unsigned NumElts = FVT->getNumElements();
1772 if (EltSize == 8 || EltSize == 16) {
1773 const unsigned SubVecSize = (32 / EltSize);
1774 Type *SubVecTy = FixedVectorType::get(EltTy, SubVecSize);
1775 for (unsigned End = alignDown(NumElts, SubVecSize); Idx < End;
1776 Idx += SubVecSize)
1777 Slices.emplace_back(SubVecTy, Idx, SubVecSize);
1778 }
1779
1780 // Scalarize all remaining elements.
1781 for (; Idx < NumElts; ++Idx)
1782 Slices.emplace_back(EltTy, Idx, 1);
1783 }
1784
1785 assert(Slices.size() > 1);
1786
1787 // Create one PHI per vector piece. The "VectorSlice" class takes care of
1788 // creating the necessary instruction to extract the relevant slices of each
1789 // incoming value.
1790 IRBuilder<> B(I.getParent());
1791 B.SetCurrentDebugLocation(I.getDebugLoc());
1792
1793 unsigned IncNameSuffix = 0;
1794 for (VectorSlice &S : Slices) {
1795 // We need to reset the build on each iteration, because getSlicedVal may
1796 // have inserted something into I's BB.
1797 B.SetInsertPoint(I.getParent()->getFirstNonPHIIt());
1798 S.NewPHI = B.CreatePHI(S.Ty, I.getNumIncomingValues());
1799
1800 for (const auto &[Idx, BB] : enumerate(I.blocks())) {
1801 S.NewPHI->addIncoming(S.getSlicedVal(BB, I.getIncomingValue(Idx),
1802 "largephi.extractslice" +
1803 std::to_string(IncNameSuffix++)),
1804 BB);
1805 }
1806 }
1807
1808 // And replace this PHI with a vector of all the previous PHI values.
1809 Value *Vec = PoisonValue::get(FVT);
1810 unsigned NameSuffix = 0;
1811 for (VectorSlice &S : Slices) {
1812 const auto ValName = "largephi.insertslice" + std::to_string(NameSuffix++);
1813 if (S.NumElts > 1)
1814 Vec = B.CreateInsertVector(FVT, Vec, S.NewPHI, S.Idx, ValName);
1815 else
1816 Vec = B.CreateInsertElement(Vec, S.NewPHI, S.Idx, ValName);
1817 }
1818
1819 I.replaceAllUsesWith(Vec);
1820 DeadVals.push_back(&I);
1821 return true;
1822}
1823
1824/// \param V Value to check
1825/// \param DL DataLayout
1826/// \param TM TargetMachine (TODO: remove once DL contains nullptr values)
1827/// \param AS Target Address Space
1828/// \return true if \p V cannot be the null value of \p AS, false otherwise.
1829static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
1830 const AMDGPUTargetMachine &TM, unsigned AS) {
1831 // Pointer cannot be null if it's a block address, GV or alloca.
1832 // NOTE: We don't support extern_weak, but if we did, we'd need to check for
1833 // it as the symbol could be null in such cases.
1835 return true;
1836
1837 // Check nonnull arguments.
1838 if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
1839 return true;
1840
1841 // Check nonnull loads.
1842 if (const auto *Load = dyn_cast<LoadInst>(V);
1843 Load && Load->hasMetadata(LLVMContext::MD_nonnull))
1844 return true;
1845
1846 // getUnderlyingObject may have looked through another addrspacecast, although
1847 // the optimizable situations most likely folded out by now.
1848 if (AS != cast<PointerType>(V->getType())->getAddressSpace())
1849 return false;
1850
1851 // TODO: Calls that return nonnull?
1852
1853 // For all other things, use KnownBits.
1854 // We either use 0 or all bits set to indicate null, so check whether the
1855 // value can be zero or all ones.
1856 //
1857 // TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
1858 // address spaces have non-zero null values.
1859 auto SrcPtrKB = computeKnownBits(V, DL);
1860 const auto NullVal = TM.getNullPointerValue(AS);
1861
1862 assert(SrcPtrKB.getBitWidth() == DL.getPointerSizeInBits(AS));
1863 assert((NullVal == 0 || NullVal == -1) &&
1864 "don't know how to check for this null value!");
1865 return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
1866}
1867
1868bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
1869 // Intrinsic doesn't support vectors, also it seems that it's often difficult
1870 // to prove that a vector cannot have any nulls in it so it's unclear if it's
1871 // worth supporting.
1872 if (I.getType()->isVectorTy())
1873 return false;
1874
1875 // Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
1876 // This is only worthwhile for casts from/to priv/local to flat.
1877 const unsigned SrcAS = I.getSrcAddressSpace();
1878 const unsigned DstAS = I.getDestAddressSpace();
1879
1880 bool CanLower = false;
1881 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
1882 CanLower = (DstAS == AMDGPUAS::LOCAL_ADDRESS ||
1883 DstAS == AMDGPUAS::PRIVATE_ADDRESS);
1884 else if (DstAS == AMDGPUAS::FLAT_ADDRESS)
1885 CanLower = (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
1886 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
1887 if (!CanLower)
1888 return false;
1889
1891 getUnderlyingObjects(I.getOperand(0), WorkList);
1892 if (!all_of(WorkList, [&](const Value *V) {
1893 return isPtrKnownNeverNull(V, DL, TM, SrcAS);
1894 }))
1895 return false;
1896
1897 IRBuilder<> B(&I);
1898 auto *Intrin = B.CreateIntrinsic(
1899 I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
1900 I.replaceAllUsesWith(Intrin);
1901 DeadVals.push_back(&I);
1902 return true;
1903}
1904
1905bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
1906 switch (I.getIntrinsicID()) {
1907 case Intrinsic::minnum:
1908 case Intrinsic::minimumnum:
1909 case Intrinsic::minimum:
1910 return visitFMinLike(I);
1911 case Intrinsic::sqrt:
1912 return visitSqrt(I);
1913 default:
1914 return false;
1915 }
1916}
1917
1918/// Match non-nan fract pattern.
1919/// minnum(fsub(x, floor(x)), nextafter(1.0, -1.0))
1920/// minimumnum(fsub(x, floor(x)), nextafter(1.0, -1.0))
1921/// minimum(fsub(x, floor(x)), nextafter(1.0, -1.0))
1922///
1923/// If fract is a useful instruction for the subtarget. Does not account for the
1924/// nan handling; the instruction has a nan check on the input value.
1925Value *AMDGPUCodeGenPrepareImpl::matchFractPat(IntrinsicInst &I) {
1926 if (ST.hasFractBug())
1927 return nullptr;
1928
1929 Intrinsic::ID IID = I.getIntrinsicID();
1930
1931 // The value is only used in contexts where we know the input isn't a nan, so
1932 // any of the fmin variants are fine.
1933 if (IID != Intrinsic::minnum && IID != Intrinsic::minimum &&
1934 IID != Intrinsic::minimumnum)
1935 return nullptr;
1936
1937 Type *Ty = I.getType();
1938 if (!isLegalFloatingTy(Ty->getScalarType()))
1939 return nullptr;
1940
1941 Value *Arg0 = I.getArgOperand(0);
1942 Value *Arg1 = I.getArgOperand(1);
1943
1944 const APFloat *C;
1945 if (!match(Arg1, m_APFloat(C)))
1946 return nullptr;
1947
1948 APFloat One(1.0);
1949 bool LosesInfo;
1950 One.convert(C->getSemantics(), APFloat::rmNearestTiesToEven, &LosesInfo);
1951
1952 // Match nextafter(1.0, -1)
1953 One.next(true);
1954 if (One != *C)
1955 return nullptr;
1956
1957 Value *FloorSrc;
1958 if (match(Arg0, m_FSub(m_Value(FloorSrc),
1960 return FloorSrc;
1961 return nullptr;
1962}
1963
1964Value *AMDGPUCodeGenPrepareImpl::applyFractPat(IRBuilder<> &Builder,
1965 Value *FractArg) {
1966 SmallVector<Value *, 4> FractVals;
1967 extractValues(Builder, FractVals, FractArg);
1968
1969 SmallVector<Value *, 4> ResultVals(FractVals.size());
1970
1971 Type *Ty = FractArg->getType()->getScalarType();
1972 for (unsigned I = 0, E = FractVals.size(); I != E; ++I) {
1973 ResultVals[I] =
1974 Builder.CreateIntrinsic(Intrinsic::amdgcn_fract, {Ty}, {FractVals[I]});
1975 }
1976
1977 return insertValues(Builder, FractArg->getType(), ResultVals);
1978}
1979
1980bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) {
1981 Value *FractArg = matchFractPat(I);
1982 if (!FractArg)
1983 return false;
1984
1985 // Match pattern for fract intrinsic in contexts where the nan check has been
1986 // optimized out (and hope the knowledge the source can't be nan wasn't lost).
1987 if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, SimplifyQuery(DL, TLI)))
1988 return false;
1989
1990 IRBuilder<> Builder(&I);
1991 FastMathFlags FMF = I.getFastMathFlags();
1992 FMF.setNoNaNs();
1993 Builder.setFastMathFlags(FMF);
1994
1995 Value *Fract = applyFractPat(Builder, FractArg);
1996 Fract->takeName(&I);
1997 I.replaceAllUsesWith(Fract);
1998 DeadVals.push_back(&I);
1999 return true;
2000}
2001
2002// Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way.
2003bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
2004 Type *Ty = Sqrt.getType()->getScalarType();
2005 if (!Ty->isFloatTy() && (!Ty->isHalfTy() || ST.has16BitInsts()))
2006 return false;
2007
2008 const FPMathOperator *FPOp = cast<const FPMathOperator>(&Sqrt);
2009 FastMathFlags SqrtFMF = FPOp->getFastMathFlags();
2010
2011 // We're trying to handle the fast-but-not-that-fast case only. The lowering
2012 // of fast llvm.sqrt will give the raw instruction anyway.
2013 if (SqrtFMF.approxFunc())
2014 return false;
2015
2016 const float ReqdAccuracy = FPOp->getFPAccuracy();
2017
2018 // Defer correctly rounded expansion to codegen.
2019 if (ReqdAccuracy < 1.0f)
2020 return false;
2021
2022 Value *SrcVal = Sqrt.getOperand(0);
2023 bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt);
2024
2025 // The raw instruction is 1 ulp, but the correction for denormal handling
2026 // brings it to 2.
2027 if (!CanTreatAsDAZ && ReqdAccuracy < 2.0f)
2028 return false;
2029
2030 IRBuilder<> Builder(&Sqrt);
2032 extractValues(Builder, SrcVals, SrcVal);
2033
2034 SmallVector<Value *, 4> ResultVals(SrcVals.size());
2035 for (int I = 0, E = SrcVals.size(); I != E; ++I) {
2036 if (CanTreatAsDAZ)
2037 ResultVals[I] = Builder.CreateCall(getSqrtF32(), SrcVals[I]);
2038 else
2039 ResultVals[I] = emitSqrtIEEE2ULP(Builder, SrcVals[I], SqrtFMF);
2040 }
2041
2042 Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals);
2043 NewSqrt->takeName(&Sqrt);
2044 Sqrt.replaceAllUsesWith(NewSqrt);
2045 DeadVals.push_back(&Sqrt);
2046 return true;
2047}
2048
2049bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
2050 if (skipFunction(F))
2051 return false;
2052
2053 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
2054 if (!TPC)
2055 return false;
2056
2057 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
2058 const TargetLibraryInfo *TLI =
2059 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2060 AssumptionCache *AC =
2061 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2062 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
2063 const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
2064 const UniformityInfo &UA =
2065 getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
2066 return AMDGPUCodeGenPrepareImpl(F, TM, TLI, AC, DT, UA).run();
2067}
2068
2071 const AMDGPUTargetMachine &ATM = static_cast<const AMDGPUTargetMachine &>(TM);
2072 const TargetLibraryInfo *TLI = &FAM.getResult<TargetLibraryAnalysis>(F);
2073 AssumptionCache *AC = &FAM.getResult<AssumptionAnalysis>(F);
2074 const DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
2075 const UniformityInfo &UA = FAM.getResult<UniformityInfoAnalysis>(F);
2076 AMDGPUCodeGenPrepareImpl Impl(F, ATM, TLI, AC, DT, UA);
2077 if (!Impl.run())
2078 return PreservedAnalyses::all();
2080 if (!Impl.FlowChanged)
2082 return PA;
2083}
2084
2085INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
2086 "AMDGPU IR optimizations", false, false)
2090INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
2092
2093char AMDGPUCodeGenPrepare::ID = 0;
2094
2096 return new AMDGPUCodeGenPrepare();
2097}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Value * insertValues(IRBuilder<> &Builder, Type *Ty, SmallVectorImpl< Value * > &Values)
static void extractValues(IRBuilder<> &Builder, SmallVectorImpl< Value * > &Values, Value *V)
static Value * getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static bool isInterestingPHIIncomingValue(const Value *V)
static SelectInst * findSelectThroughCast(Value *V, CastInst *&Cast)
static std::pair< Value *, Value * > getMul64(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static Value * emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src, bool IsNegative)
Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
static Value * getSign32(Value *V, IRBuilder<> &Builder, const DataLayout DL)
static void collectPHINodes(const PHINode &I, SmallPtrSet< const PHINode *, 8 > &SeenPHIs)
static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL, const AMDGPUTargetMachine &TM, unsigned AS)
static bool areInSameBB(const Value *A, const Value *B)
static cl::opt< bool > WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads", cl::desc("Widen sub-dword constant address space loads in " "AMDGPULateCodeGenPrepare"), cl::ReallyHidden, cl::init(true))
The AMDGPU TargetMachine interface definition for hw codegen targets.
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
dxil translate DXIL Translate Metadata
static bool runOnFunction(Function &F, bool PostInlining)
static bool isSigned(unsigned int Opcode)
#define DEBUG_TYPE
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
LLVM IR instance of the generic uniformity analysis.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
BinaryOperator * Mul
VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
Value * getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName)
Slice Inc according to the information contained within this slice.
PreservedAnalyses run(Function &, FunctionAnalysisManager &)
bool hasMadMacF32Insts() const
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition APFloat.h:1158
This class represents a conversion between pointers from one address space to another.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
BitVector & set()
Definition BitVector.h:370
bool all() const
all - Returns true if all bits are set.
Definition BitVector.h:194
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
LLVM_ABI bool isExactlyValue(const APFloat &V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Definition Operator.h:333
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
void setFast(bool B=true)
Definition FMF.h:96
bool noInfs() const
Definition FMF.h:66
bool allowReciprocal() const
Definition FMF.h:68
bool approxFunc() const
Definition FMF.h:70
void setNoNaNs(bool B=true)
Definition FMF.h:78
bool noNaNs() const
Definition FMF.h:65
bool allowContract() const
Definition FMF.h:69
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
bool hasFractBug() const
bool isUniform(ConstValueRefT V) const
Whether V is uniform/non-divergent.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2574
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition IRBuilder.h:1670
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2158
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2562
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition IRBuilder.h:575
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2103
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2621
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFPToUI(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2131
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2097
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2145
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:345
Value * CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2387
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition IRBuilder.h:247
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition IRBuilder.h:1784
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2207
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1850
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1492
FastMathFlags getFastMathFlags() const
Get the flags to be applied to created floating point ops.
Definition IRBuilder.h:334
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2085
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Type * getFloatTy()
Fetch the type representing a 32-bit floating point value.
Definition IRBuilder.h:590
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2511
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2071
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1708
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2344
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1532
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1599
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition IRBuilder.h:1651
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1793
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2118
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
Value * CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2382
Value * CreateFPToSI(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2138
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2783
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:298
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:107
Analysis pass which computes UniformityInfo.
Legacy analysis pass which computes a CycleInfo.
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Type * getElementType() const
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:515
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cstfp_pred_ty< is_nonnan > m_NonNaN()
Match a non-NaN FP constant.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:533
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI bool expandRemainderUpTo64Bits(BinaryOperator *Rem)
Generate code to calculate the remainder of two integers, replacing Rem with the generated code.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:557
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition bit.h:331
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI bool expandDivisionUpTo64Bits(BinaryOperator *Div)
Generate code to divide two integers, replacing Div with the generated code.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
FunctionPass * createAMDGPUCodeGenPreparePass()
To bit_cast(const From &from) noexcept
Definition bit.h:90
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
#define N
static constexpr DenormalMode getPreserveSign()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.