LLVM 18.0.0git
AMDGPUCodeGenPrepare.cpp
Go to the documentation of this file.
1//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/IntrinsicsAMDGPU.h"
30#include "llvm/Pass.h"
34
35#define DEBUG_TYPE "amdgpu-codegenprepare"
36
37using namespace llvm;
38using namespace llvm::PatternMatch;
39
40namespace {
41
43 "amdgpu-codegenprepare-widen-constant-loads",
44 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
46 cl::init(false));
47
48static cl::opt<bool> Widen16BitOps(
49 "amdgpu-codegenprepare-widen-16-bit-ops",
50 cl::desc("Widen uniform 16-bit instructions to 32-bit in AMDGPUCodeGenPrepare"),
52 cl::init(true));
53
54static cl::opt<bool>
55 BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
56 cl::desc("Break large PHI nodes for DAGISel"),
58
59static cl::opt<bool>
60 ForceBreakLargePHIs("amdgpu-codegenprepare-force-break-large-phis",
61 cl::desc("For testing purposes, always break large "
62 "PHIs even if it isn't profitable."),
64
65static cl::opt<unsigned> BreakLargePHIsThreshold(
66 "amdgpu-codegenprepare-break-large-phis-threshold",
67 cl::desc("Minimum type size in bits for breaking large PHI nodes"),
69
70static cl::opt<bool> UseMul24Intrin(
71 "amdgpu-codegenprepare-mul24",
72 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
74 cl::init(true));
75
76// Legalize 64-bit division by using the generic IR expansion.
77static cl::opt<bool> ExpandDiv64InIR(
78 "amdgpu-codegenprepare-expand-div64",
79 cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
81 cl::init(false));
82
83// Leave all division operations as they are. This supersedes ExpandDiv64InIR
84// and is used for testing the legalizer.
85static cl::opt<bool> DisableIDivExpand(
86 "amdgpu-codegenprepare-disable-idiv-expansion",
87 cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
89 cl::init(false));
90
91// Disable processing of fdiv so we can better test the backend implementations.
92static cl::opt<bool> DisableFDivExpand(
93 "amdgpu-codegenprepare-disable-fdiv-expansion",
94 cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
96 cl::init(false));
97
98class AMDGPUCodeGenPrepareImpl
99 : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
100public:
101 const GCNSubtarget *ST = nullptr;
102 const TargetLibraryInfo *TLInfo = nullptr;
103 AssumptionCache *AC = nullptr;
104 DominatorTree *DT = nullptr;
105 UniformityInfo *UA = nullptr;
106 Module *Mod = nullptr;
107 const DataLayout *DL = nullptr;
108 bool HasUnsafeFPMath = false;
109 bool HasFP32DenormalFlush = false;
110 bool FlowChanged = false;
111 mutable Function *SqrtF32 = nullptr;
112 mutable Function *LdexpF32 = nullptr;
113
114 DenseMap<const PHINode *, bool> BreakPhiNodesCache;
115
116 Function *getSqrtF32() const {
117 if (SqrtF32)
118 return SqrtF32;
119
120 LLVMContext &Ctx = Mod->getContext();
121 SqrtF32 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_sqrt,
122 {Type::getFloatTy(Ctx)});
123 return SqrtF32;
124 }
125
126 Function *getLdexpF32() const {
127 if (LdexpF32)
128 return LdexpF32;
129
130 LLVMContext &Ctx = Mod->getContext();
131 LdexpF32 = Intrinsic::getDeclaration(
132 Mod, Intrinsic::ldexp, {Type::getFloatTy(Ctx), Type::getInt32Ty(Ctx)});
133 return LdexpF32;
134 }
135
136 bool canBreakPHINode(const PHINode &I);
137
138 /// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
139 /// binary operation \p V.
140 ///
141 /// \returns Binary operation \p V.
142 /// \returns \p T's base element bit width.
143 unsigned getBaseElementBitWidth(const Type *T) const;
144
145 /// \returns Equivalent 32 bit integer type for given type \p T. For example,
146 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
147 /// is returned.
148 Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
149
150 /// \returns True if binary operation \p I is a signed binary operation, false
151 /// otherwise.
152 bool isSigned(const BinaryOperator &I) const;
153
154 /// \returns True if the condition of 'select' operation \p I comes from a
155 /// signed 'icmp' operation, false otherwise.
156 bool isSigned(const SelectInst &I) const;
157
158 /// \returns True if type \p T needs to be promoted to 32 bit integer type,
159 /// false otherwise.
160 bool needsPromotionToI32(const Type *T) const;
161
162 /// Return true if \p T is a legal scalar floating point type.
163 bool isLegalFloatingTy(const Type *T) const;
164
165 /// Wrapper to pass all the arguments to computeKnownFPClass
167 const Instruction *CtxI) const {
168 return llvm::computeKnownFPClass(V, *DL, Interested, 0, TLInfo, AC, CtxI,
169 DT);
170 }
171
172 bool canIgnoreDenormalInput(const Value *V, const Instruction *CtxI) const {
173 return HasFP32DenormalFlush ||
175 }
176
177 /// Promotes uniform binary operation \p I to equivalent 32 bit binary
178 /// operation.
179 ///
180 /// \details \p I's base element bit width must be greater than 1 and less
181 /// than or equal 16. Promotion is done by sign or zero extending operands to
182 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
183 /// truncating the result of 32 bit binary operation back to \p I's original
184 /// type. Division operation is not promoted.
185 ///
186 /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
187 /// false otherwise.
188 bool promoteUniformOpToI32(BinaryOperator &I) const;
189
190 /// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
191 ///
192 /// \details \p I's base element bit width must be greater than 1 and less
193 /// than or equal 16. Promotion is done by sign or zero extending operands to
194 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
195 ///
196 /// \returns True.
197 bool promoteUniformOpToI32(ICmpInst &I) const;
198
199 /// Promotes uniform 'select' operation \p I to 32 bit 'select'
200 /// operation.
201 ///
202 /// \details \p I's base element bit width must be greater than 1 and less
203 /// than or equal 16. Promotion is done by sign or zero extending operands to
204 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
205 /// result of 32 bit 'select' operation back to \p I's original type.
206 ///
207 /// \returns True.
208 bool promoteUniformOpToI32(SelectInst &I) const;
209
210 /// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
211 /// intrinsic.
212 ///
213 /// \details \p I's base element bit width must be greater than 1 and less
214 /// than or equal 16. Promotion is done by zero extending the operand to 32
215 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
216 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
217 /// shift amount is 32 minus \p I's base element bit width), and truncating
218 /// the result of the shift operation back to \p I's original type.
219 ///
220 /// \returns True.
221 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
222
223 /// \returns The minimum number of bits needed to store the value of \Op as an
224 /// unsigned integer. Truncating to this size and then zero-extending to
225 /// the original will not change the value.
226 unsigned numBitsUnsigned(Value *Op) const;
227
228 /// \returns The minimum number of bits needed to store the value of \Op as a
229 /// signed integer. Truncating to this size and then sign-extending to
230 /// the original size will not change the value.
231 unsigned numBitsSigned(Value *Op) const;
232
233 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
234 /// SelectionDAG has an issue where an and asserting the bits are known
235 bool replaceMulWithMul24(BinaryOperator &I) const;
236
237 /// Perform same function as equivalently named function in DAGCombiner. Since
238 /// we expand some divisions here, we need to perform this before obscuring.
239 bool foldBinOpIntoSelect(BinaryOperator &I) const;
240
241 bool divHasSpecialOptimization(BinaryOperator &I,
242 Value *Num, Value *Den) const;
243 int getDivNumBits(BinaryOperator &I,
244 Value *Num, Value *Den,
245 unsigned AtLeast, bool Signed) const;
246
247 /// Expands 24 bit div or rem.
248 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
249 Value *Num, Value *Den,
250 bool IsDiv, bool IsSigned) const;
251
252 Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
253 Value *Num, Value *Den, unsigned NumBits,
254 bool IsDiv, bool IsSigned) const;
255
256 /// Expands 32 bit div or rem.
257 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
258 Value *Num, Value *Den) const;
259
260 Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
261 Value *Num, Value *Den) const;
262 void expandDivRem64(BinaryOperator &I) const;
263
264 /// Widen a scalar load.
265 ///
266 /// \details \p Widen scalar load for uniform, small type loads from constant
267 // memory / to a full 32-bits and then truncate the input to allow a scalar
268 // load instead of a vector load.
269 //
270 /// \returns True.
271
272 bool canWidenScalarExtLoad(LoadInst &I) const;
273
274 Value *matchFractPat(IntrinsicInst &I);
275 Value *applyFractPat(IRBuilder<> &Builder, Value *FractArg);
276
277 bool canOptimizeWithRsq(const FPMathOperator *SqrtOp, FastMathFlags DivFMF,
278 FastMathFlags SqrtFMF) const;
279
280 Value *optimizeWithRsq(IRBuilder<> &Builder, Value *Num, Value *Den,
281 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
282 const Instruction *CtxI) const;
283
284 Value *optimizeWithRcp(IRBuilder<> &Builder, Value *Num, Value *Den,
285 FastMathFlags FMF, const Instruction *CtxI) const;
286 Value *optimizeWithFDivFast(IRBuilder<> &Builder, Value *Num, Value *Den,
287 float ReqdAccuracy) const;
288
289 Value *visitFDivElement(IRBuilder<> &Builder, Value *Num, Value *Den,
290 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
291 Value *RsqOp, const Instruction *FDiv,
292 float ReqdAccuracy) const;
293
294 std::pair<Value *, Value *> getFrexpResults(IRBuilder<> &Builder,
295 Value *Src) const;
296
297 Value *emitRcpIEEE1ULP(IRBuilder<> &Builder, Value *Src,
298 bool IsNegative) const;
299 Value *emitFrexpDiv(IRBuilder<> &Builder, Value *LHS, Value *RHS,
300 FastMathFlags FMF) const;
301 Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src,
302 FastMathFlags FMF) const;
303
304public:
305 bool visitFDiv(BinaryOperator &I);
306
307 bool visitInstruction(Instruction &I) { return false; }
309 bool visitLoadInst(LoadInst &I);
310 bool visitICmpInst(ICmpInst &I);
312 bool visitPHINode(PHINode &I);
313
315 bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
316 bool visitMinNum(IntrinsicInst &I);
317 bool visitSqrt(IntrinsicInst &I);
318 bool run(Function &F);
319};
320
321class AMDGPUCodeGenPrepare : public FunctionPass {
322private:
323 AMDGPUCodeGenPrepareImpl Impl;
324
325public:
326 static char ID;
327 AMDGPUCodeGenPrepare() : FunctionPass(ID) {
329 }
330 void getAnalysisUsage(AnalysisUsage &AU) const override {
334
335 // FIXME: Division expansion needs to preserve the dominator tree.
336 if (!ExpandDiv64InIR)
337 AU.setPreservesAll();
338 }
339 bool runOnFunction(Function &F) override;
340 bool doInitialization(Module &M) override;
341 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
342};
343
344} // end anonymous namespace
345
346bool AMDGPUCodeGenPrepareImpl::run(Function &F) {
347 BreakPhiNodesCache.clear();
348 bool MadeChange = false;
349
350 Function::iterator NextBB;
351 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
352 BasicBlock *BB = &*FI;
353 NextBB = std::next(FI);
354
356 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
357 I = Next) {
358 Next = std::next(I);
359
360 MadeChange |= visit(*I);
361
362 if (Next != E) { // Control flow changed
363 BasicBlock *NextInstBB = Next->getParent();
364 if (NextInstBB != BB) {
365 BB = NextInstBB;
366 E = BB->end();
367 FE = F.end();
368 }
369 }
370 }
371 }
372 return MadeChange;
373}
374
375unsigned AMDGPUCodeGenPrepareImpl::getBaseElementBitWidth(const Type *T) const {
376 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
377
378 if (T->isIntegerTy())
379 return T->getIntegerBitWidth();
380 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
381}
382
383Type *AMDGPUCodeGenPrepareImpl::getI32Ty(IRBuilder<> &B, const Type *T) const {
384 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
385
386 if (T->isIntegerTy())
387 return B.getInt32Ty();
388 return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T));
389}
390
391bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
392 return I.getOpcode() == Instruction::AShr ||
393 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
394}
395
396bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
397 return isa<ICmpInst>(I.getOperand(0)) ?
398 cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
399}
400
401bool AMDGPUCodeGenPrepareImpl::needsPromotionToI32(const Type *T) const {
402 if (!Widen16BitOps)
403 return false;
404
405 const IntegerType *IntTy = dyn_cast<IntegerType>(T);
406 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
407 return true;
408
409 if (const VectorType *VT = dyn_cast<VectorType>(T)) {
410 // TODO: The set of packed operations is more limited, so may want to
411 // promote some anyway.
412 if (ST->hasVOP3PInsts())
413 return false;
414
415 return needsPromotionToI32(VT->getElementType());
416 }
417
418 return false;
419}
420
421bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const {
422 return Ty->isFloatTy() || Ty->isDoubleTy() ||
423 (Ty->isHalfTy() && ST->has16BitInsts());
424}
425
426// Return true if the op promoted to i32 should have nsw set.
427static bool promotedOpIsNSW(const Instruction &I) {
428 switch (I.getOpcode()) {
429 case Instruction::Shl:
430 case Instruction::Add:
431 case Instruction::Sub:
432 return true;
433 case Instruction::Mul:
434 return I.hasNoUnsignedWrap();
435 default:
436 return false;
437 }
438}
439
440// Return true if the op promoted to i32 should have nuw set.
441static bool promotedOpIsNUW(const Instruction &I) {
442 switch (I.getOpcode()) {
443 case Instruction::Shl:
444 case Instruction::Add:
445 case Instruction::Mul:
446 return true;
447 case Instruction::Sub:
448 return I.hasNoUnsignedWrap();
449 default:
450 return false;
451 }
452}
453
454bool AMDGPUCodeGenPrepareImpl::canWidenScalarExtLoad(LoadInst &I) const {
455 Type *Ty = I.getType();
456 const DataLayout &DL = Mod->getDataLayout();
457 int TySize = DL.getTypeSizeInBits(Ty);
458 Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
459
460 return I.isSimple() && TySize < 32 && Alignment >= 4 && UA->isUniform(&I);
461}
462
463bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(BinaryOperator &I) const {
464 assert(needsPromotionToI32(I.getType()) &&
465 "I does not need promotion to i32");
466
467 if (I.getOpcode() == Instruction::SDiv ||
468 I.getOpcode() == Instruction::UDiv ||
469 I.getOpcode() == Instruction::SRem ||
470 I.getOpcode() == Instruction::URem)
471 return false;
472
473 IRBuilder<> Builder(&I);
474 Builder.SetCurrentDebugLocation(I.getDebugLoc());
475
476 Type *I32Ty = getI32Ty(Builder, I.getType());
477 Value *ExtOp0 = nullptr;
478 Value *ExtOp1 = nullptr;
479 Value *ExtRes = nullptr;
480 Value *TruncRes = nullptr;
481
482 if (isSigned(I)) {
483 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
484 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
485 } else {
486 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
487 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
488 }
489
490 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
491 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
492 if (promotedOpIsNSW(cast<Instruction>(I)))
493 Inst->setHasNoSignedWrap();
494
495 if (promotedOpIsNUW(cast<Instruction>(I)))
496 Inst->setHasNoUnsignedWrap();
497
498 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
499 Inst->setIsExact(ExactOp->isExact());
500 }
501
502 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
503
504 I.replaceAllUsesWith(TruncRes);
505 I.eraseFromParent();
506
507 return true;
508}
509
510bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(ICmpInst &I) const {
511 assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
512 "I does not need promotion to i32");
513
514 IRBuilder<> Builder(&I);
515 Builder.SetCurrentDebugLocation(I.getDebugLoc());
516
517 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
518 Value *ExtOp0 = nullptr;
519 Value *ExtOp1 = nullptr;
520 Value *NewICmp = nullptr;
521
522 if (I.isSigned()) {
523 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
524 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
525 } else {
526 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
527 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
528 }
529 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
530
531 I.replaceAllUsesWith(NewICmp);
532 I.eraseFromParent();
533
534 return true;
535}
536
537bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(SelectInst &I) const {
538 assert(needsPromotionToI32(I.getType()) &&
539 "I does not need promotion to i32");
540
541 IRBuilder<> Builder(&I);
542 Builder.SetCurrentDebugLocation(I.getDebugLoc());
543
544 Type *I32Ty = getI32Ty(Builder, I.getType());
545 Value *ExtOp1 = nullptr;
546 Value *ExtOp2 = nullptr;
547 Value *ExtRes = nullptr;
548 Value *TruncRes = nullptr;
549
550 if (isSigned(I)) {
551 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
552 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
553 } else {
554 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
555 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
556 }
557 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
558 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
559
560 I.replaceAllUsesWith(TruncRes);
561 I.eraseFromParent();
562
563 return true;
564}
565
566bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
567 IntrinsicInst &I) const {
568 assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
569 "I must be bitreverse intrinsic");
570 assert(needsPromotionToI32(I.getType()) &&
571 "I does not need promotion to i32");
572
573 IRBuilder<> Builder(&I);
574 Builder.SetCurrentDebugLocation(I.getDebugLoc());
575
576 Type *I32Ty = getI32Ty(Builder, I.getType());
577 Function *I32 =
578 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
579 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
580 Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
581 Value *LShrOp =
582 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
583 Value *TruncRes =
584 Builder.CreateTrunc(LShrOp, I.getType());
585
586 I.replaceAllUsesWith(TruncRes);
587 I.eraseFromParent();
588
589 return true;
590}
591
592unsigned AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op) const {
593 return computeKnownBits(Op, *DL, 0, AC).countMaxActiveBits();
594}
595
596unsigned AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op) const {
597 return ComputeMaxSignificantBits(Op, *DL, 0, AC);
598}
599
600static void extractValues(IRBuilder<> &Builder,
601 SmallVectorImpl<Value *> &Values, Value *V) {
602 auto *VT = dyn_cast<FixedVectorType>(V->getType());
603 if (!VT) {
604 Values.push_back(V);
605 return;
606 }
607
608 for (int I = 0, E = VT->getNumElements(); I != E; ++I)
609 Values.push_back(Builder.CreateExtractElement(V, I));
610}
611
613 Type *Ty,
614 SmallVectorImpl<Value *> &Values) {
615 if (!Ty->isVectorTy()) {
616 assert(Values.size() == 1);
617 return Values[0];
618 }
619
620 Value *NewVal = PoisonValue::get(Ty);
621 for (int I = 0, E = Values.size(); I != E; ++I)
622 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
623
624 return NewVal;
625}
626
627bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
628 if (I.getOpcode() != Instruction::Mul)
629 return false;
630
631 Type *Ty = I.getType();
632 unsigned Size = Ty->getScalarSizeInBits();
633 if (Size <= 16 && ST->has16BitInsts())
634 return false;
635
636 // Prefer scalar if this could be s_mul_i32
637 if (UA->isUniform(&I))
638 return false;
639
640 Value *LHS = I.getOperand(0);
641 Value *RHS = I.getOperand(1);
642 IRBuilder<> Builder(&I);
643 Builder.SetCurrentDebugLocation(I.getDebugLoc());
644
645 unsigned LHSBits = 0, RHSBits = 0;
646 bool IsSigned = false;
647
648 if (ST->hasMulU24() && (LHSBits = numBitsUnsigned(LHS)) <= 24 &&
649 (RHSBits = numBitsUnsigned(RHS)) <= 24) {
650 IsSigned = false;
651
652 } else if (ST->hasMulI24() && (LHSBits = numBitsSigned(LHS)) <= 24 &&
653 (RHSBits = numBitsSigned(RHS)) <= 24) {
654 IsSigned = true;
655
656 } else
657 return false;
658
661 SmallVector<Value *, 4> ResultVals;
662 extractValues(Builder, LHSVals, LHS);
663 extractValues(Builder, RHSVals, RHS);
664
665 IntegerType *I32Ty = Builder.getInt32Ty();
666 IntegerType *IntrinTy = Size > 32 ? Builder.getInt64Ty() : I32Ty;
667 Type *DstTy = LHSVals[0]->getType();
668
669 for (int I = 0, E = LHSVals.size(); I != E; ++I) {
670 Value *LHS = IsSigned ? Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty)
671 : Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
672 Value *RHS = IsSigned ? Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty)
673 : Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
675 IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
676 Value *Result = Builder.CreateIntrinsic(ID, {IntrinTy}, {LHS, RHS});
677 Result = IsSigned ? Builder.CreateSExtOrTrunc(Result, DstTy)
678 : Builder.CreateZExtOrTrunc(Result, DstTy);
679 ResultVals.push_back(Result);
680 }
681
682 Value *NewVal = insertValues(Builder, Ty, ResultVals);
683 NewVal->takeName(&I);
684 I.replaceAllUsesWith(NewVal);
685 I.eraseFromParent();
686
687 return true;
688}
689
690// Find a select instruction, which may have been casted. This is mostly to deal
691// with cases where i16 selects were promoted here to i32.
693 Cast = nullptr;
694 if (SelectInst *Sel = dyn_cast<SelectInst>(V))
695 return Sel;
696
697 if ((Cast = dyn_cast<CastInst>(V))) {
698 if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
699 return Sel;
700 }
701
702 return nullptr;
703}
704
705bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
706 // Don't do this unless the old select is going away. We want to eliminate the
707 // binary operator, not replace a binop with a select.
708 int SelOpNo = 0;
709
710 CastInst *CastOp;
711
712 // TODO: Should probably try to handle some cases with multiple
713 // users. Duplicating the select may be profitable for division.
714 SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
715 if (!Sel || !Sel->hasOneUse()) {
716 SelOpNo = 1;
717 Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
718 }
719
720 if (!Sel || !Sel->hasOneUse())
721 return false;
722
723 Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
724 Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
725 Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
726 if (!CBO || !CT || !CF)
727 return false;
728
729 if (CastOp) {
730 if (!CastOp->hasOneUse())
731 return false;
732 CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL);
733 CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL);
734 }
735
736 // TODO: Handle special 0/-1 cases DAG combine does, although we only really
737 // need to handle divisions here.
738 Constant *FoldedT = SelOpNo ?
739 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
741 if (!FoldedT || isa<ConstantExpr>(FoldedT))
742 return false;
743
744 Constant *FoldedF = SelOpNo ?
745 ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
747 if (!FoldedF || isa<ConstantExpr>(FoldedF))
748 return false;
749
750 IRBuilder<> Builder(&BO);
751 Builder.SetCurrentDebugLocation(BO.getDebugLoc());
752 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
753 Builder.setFastMathFlags(FPOp->getFastMathFlags());
754
755 Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
756 FoldedT, FoldedF);
757 NewSelect->takeName(&BO);
758 BO.replaceAllUsesWith(NewSelect);
759 BO.eraseFromParent();
760 if (CastOp)
761 CastOp->eraseFromParent();
762 Sel->eraseFromParent();
763 return true;
764}
765
766std::pair<Value *, Value *>
767AMDGPUCodeGenPrepareImpl::getFrexpResults(IRBuilder<> &Builder,
768 Value *Src) const {
769 Type *Ty = Src->getType();
770 Value *Frexp = Builder.CreateIntrinsic(Intrinsic::frexp,
771 {Ty, Builder.getInt32Ty()}, Src);
772 Value *FrexpMant = Builder.CreateExtractValue(Frexp, {0});
773
774 // Bypass the bug workaround for the exponent result since it doesn't matter.
775 // TODO: Does the bug workaround even really need to consider the exponent
776 // result? It's unspecified by the spec.
777
778 Value *FrexpExp =
779 ST->hasFractBug()
780 ? Builder.CreateIntrinsic(Intrinsic::amdgcn_frexp_exp,
781 {Builder.getInt32Ty(), Ty}, Src)
782 : Builder.CreateExtractValue(Frexp, {1});
783 return {FrexpMant, FrexpExp};
784}
785
786/// Emit an expansion of 1.0 / Src good for 1ulp that supports denormals.
787Value *AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(IRBuilder<> &Builder,
788 Value *Src,
789 bool IsNegative) const {
790 // Same as for 1.0, but expand the sign out of the constant.
791 // -1.0 / x -> rcp (fneg x)
792 if (IsNegative)
793 Src = Builder.CreateFNeg(Src);
794
795 // The rcp instruction doesn't support denormals, so scale the input
796 // out of the denormal range and convert at the end.
797 //
798 // Expand as 2^-n * (1.0 / (x * 2^n))
799
800 // TODO: Skip scaling if input is known never denormal and the input
801 // range won't underflow to denormal. The hard part is knowing the
802 // result. We need a range check, the result could be denormal for
803 // 0x1p+126 < den <= 0x1p+127.
804 auto [FrexpMant, FrexpExp] = getFrexpResults(Builder, Src);
805 Value *ScaleFactor = Builder.CreateNeg(FrexpExp);
806 Value *Rcp = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMant);
807 return Builder.CreateCall(getLdexpF32(), {Rcp, ScaleFactor});
808}
809
810/// Emit a 2ulp expansion for fdiv by using frexp for input scaling.
811Value *AMDGPUCodeGenPrepareImpl::emitFrexpDiv(IRBuilder<> &Builder, Value *LHS,
812 Value *RHS,
813 FastMathFlags FMF) const {
814 // If we have have to work around the fract/frexp bug, we're worse off than
815 // using the fdiv.fast expansion. The full safe expansion is faster if we have
816 // fast FMA.
817 if (HasFP32DenormalFlush && ST->hasFractBug() && !ST->hasFastFMAF32() &&
818 (!FMF.noNaNs() || !FMF.noInfs()))
819 return nullptr;
820
821 // We're scaling the LHS to avoid a denormal input, and scale the denominator
822 // to avoid large values underflowing the result.
823 auto [FrexpMantRHS, FrexpExpRHS] = getFrexpResults(Builder, RHS);
824
825 Value *Rcp =
826 Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMantRHS);
827
828 auto [FrexpMantLHS, FrexpExpLHS] = getFrexpResults(Builder, LHS);
829 Value *Mul = Builder.CreateFMul(FrexpMantLHS, Rcp);
830
831 // We multiplied by 2^N/2^M, so we need to multiply by 2^(N-M) to scale the
832 // result.
833 Value *ExpDiff = Builder.CreateSub(FrexpExpLHS, FrexpExpRHS);
834 return Builder.CreateCall(getLdexpF32(), {Mul, ExpDiff});
835}
836
837/// Emit a sqrt that handles denormals and is accurate to 2ulp.
838Value *AMDGPUCodeGenPrepareImpl::emitSqrtIEEE2ULP(IRBuilder<> &Builder,
839 Value *Src,
840 FastMathFlags FMF) const {
841 Type *Ty = Src->getType();
842 APFloat SmallestNormal =
844 Value *NeedScale =
845 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
846
847 ConstantInt *Zero = Builder.getInt32(0);
848 Value *InputScaleFactor =
849 Builder.CreateSelect(NeedScale, Builder.getInt32(32), Zero);
850
851 Value *Scaled = Builder.CreateCall(getLdexpF32(), {Src, InputScaleFactor});
852
853 Value *Sqrt = Builder.CreateCall(getSqrtF32(), Scaled);
854
855 Value *OutputScaleFactor =
856 Builder.CreateSelect(NeedScale, Builder.getInt32(-16), Zero);
857 return Builder.CreateCall(getLdexpF32(), {Sqrt, OutputScaleFactor});
858}
859
860/// Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
861static Value *emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src,
862 bool IsNegative) {
863 // bool need_scale = x < 0x1p-126f;
864 // float input_scale = need_scale ? 0x1.0p+24f : 1.0f;
865 // float output_scale = need_scale ? 0x1.0p+12f : 1.0f;
866 // rsq(x * input_scale) * output_scale;
867
868 Type *Ty = Src->getType();
869 APFloat SmallestNormal =
871 Value *NeedScale =
872 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
873 Constant *One = ConstantFP::get(Ty, 1.0);
874 Constant *InputScale = ConstantFP::get(Ty, 0x1.0p+24);
875 Constant *OutputScale =
876 ConstantFP::get(Ty, IsNegative ? -0x1.0p+12 : 0x1.0p+12);
877
878 Value *InputScaleFactor = Builder.CreateSelect(NeedScale, InputScale, One);
879
880 Value *ScaledInput = Builder.CreateFMul(Src, InputScaleFactor);
881 Value *Rsq = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, ScaledInput);
882 Value *OutputScaleFactor = Builder.CreateSelect(
883 NeedScale, OutputScale, IsNegative ? ConstantFP::get(Ty, -1.0) : One);
884
885 return Builder.CreateFMul(Rsq, OutputScaleFactor);
886}
887
888bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
889 FastMathFlags DivFMF,
890 FastMathFlags SqrtFMF) const {
891 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
892 if (!DivFMF.allowContract() || !SqrtFMF.allowContract())
893 return false;
894
895 // v_rsq_f32 gives 1ulp
896 return SqrtFMF.approxFunc() || HasUnsafeFPMath ||
897 SqrtOp->getFPAccuracy() >= 1.0f;
898}
899
900Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
901 IRBuilder<> &Builder, Value *Num, Value *Den, const FastMathFlags DivFMF,
902 const FastMathFlags SqrtFMF, const Instruction *CtxI) const {
903 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
904 assert(DivFMF.allowContract() && SqrtFMF.allowContract());
905
906 // rsq_f16 is accurate to 0.51 ulp.
907 // rsq_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
908 // rsq_f64 is never accurate.
909 const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num);
910 if (!CLHS)
911 return nullptr;
912
913 assert(Den->getType()->isFloatTy());
914
915 bool IsNegative = false;
916
917 // TODO: Handle other numerator values with arcp.
918 if (CLHS->isExactlyValue(1.0) || (IsNegative = CLHS->isExactlyValue(-1.0))) {
919 // Add in the sqrt flags.
920 IRBuilder<>::FastMathFlagGuard Guard(Builder);
921 Builder.setFastMathFlags(DivFMF | SqrtFMF);
922
923 if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) || HasUnsafeFPMath ||
924 canIgnoreDenormalInput(Den, CtxI)) {
925 Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
926 // -1.0 / sqrt(x) -> fneg(rsq(x))
927 return IsNegative ? Builder.CreateFNeg(Result) : Result;
928 }
929
930 return emitRsqIEEE1ULP(Builder, Den, IsNegative);
931 }
932
933 return nullptr;
934}
935
936// Optimize fdiv with rcp:
937//
938// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
939// allowed with unsafe-fp-math or afn.
940//
941// a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
942Value *
943AMDGPUCodeGenPrepareImpl::optimizeWithRcp(IRBuilder<> &Builder, Value *Num,
944 Value *Den, FastMathFlags FMF,
945 const Instruction *CtxI) const {
946 // rcp_f16 is accurate to 0.51 ulp.
947 // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
948 // rcp_f64 is never accurate.
949 assert(Den->getType()->isFloatTy());
950
951 if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
952 bool IsNegative = false;
953 if (CLHS->isExactlyValue(1.0) ||
954 (IsNegative = CLHS->isExactlyValue(-1.0))) {
955 Value *Src = Den;
956
957 if (HasFP32DenormalFlush || FMF.approxFunc()) {
958 // -1.0 / x -> 1.0 / fneg(x)
959 if (IsNegative)
960 Src = Builder.CreateFNeg(Src);
961
962 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
963 // the CI documentation has a worst case error of 1 ulp.
964 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK
965 // to use it as long as we aren't trying to use denormals.
966 //
967 // v_rcp_f16 and v_rsq_f16 DO support denormals.
968
969 // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
970 // insert rsq intrinsic here.
971
972 // 1.0 / x -> rcp(x)
973 return Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Src);
974 }
975
976 // TODO: If the input isn't denormal, and we know the input exponent isn't
977 // big enough to introduce a denormal we can avoid the scaling.
978 return emitRcpIEEE1ULP(Builder, Src, IsNegative);
979 }
980 }
981
982 if (FMF.allowReciprocal()) {
983 // x / y -> x * (1.0 / y)
984
985 // TODO: Could avoid denormal scaling and use raw rcp if we knew the output
986 // will never underflow.
987 if (HasFP32DenormalFlush || FMF.approxFunc()) {
988 Value *Recip = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Den);
989 return Builder.CreateFMul(Num, Recip);
990 }
991
992 Value *Recip = emitRcpIEEE1ULP(Builder, Den, false);
993 return Builder.CreateFMul(Num, Recip);
994 }
995
996 return nullptr;
997}
998
999// optimize with fdiv.fast:
1000//
1001// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
1002//
1003// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
1004//
1005// NOTE: optimizeWithRcp should be tried first because rcp is the preference.
1006Value *AMDGPUCodeGenPrepareImpl::optimizeWithFDivFast(
1007 IRBuilder<> &Builder, Value *Num, Value *Den, float ReqdAccuracy) const {
1008 // fdiv.fast can achieve 2.5 ULP accuracy.
1009 if (ReqdAccuracy < 2.5f)
1010 return nullptr;
1011
1012 // Only have fdiv.fast for f32.
1013 assert(Den->getType()->isFloatTy());
1014
1015 bool NumIsOne = false;
1016 if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
1017 if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
1018 NumIsOne = true;
1019 }
1020
1021 // fdiv does not support denormals. But 1.0/x is always fine to use it.
1022 //
1023 // TODO: This works for any value with a specific known exponent range, don't
1024 // just limit to constant 1.
1025 if (!HasFP32DenormalFlush && !NumIsOne)
1026 return nullptr;
1027
1028 return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {}, {Num, Den});
1029}
1030
1031Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
1032 IRBuilder<> &Builder, Value *Num, Value *Den, FastMathFlags DivFMF,
1033 FastMathFlags SqrtFMF, Value *RsqOp, const Instruction *FDivInst,
1034 float ReqdDivAccuracy) const {
1035 if (RsqOp) {
1036 Value *Rsq =
1037 optimizeWithRsq(Builder, Num, RsqOp, DivFMF, SqrtFMF, FDivInst);
1038 if (Rsq)
1039 return Rsq;
1040 }
1041
1042 Value *Rcp = optimizeWithRcp(Builder, Num, Den, DivFMF, FDivInst);
1043 if (Rcp)
1044 return Rcp;
1045
1046 // In the basic case fdiv_fast has the same instruction count as the frexp div
1047 // expansion. Slightly prefer fdiv_fast since it ends in an fmul that can
1048 // potentially be fused into a user. Also, materialization of the constants
1049 // can be reused for multiple instances.
1050 Value *FDivFast = optimizeWithFDivFast(Builder, Num, Den, ReqdDivAccuracy);
1051 if (FDivFast)
1052 return FDivFast;
1053
1054 return emitFrexpDiv(Builder, Num, Den, DivFMF);
1055}
1056
1057// Optimizations is performed based on fpmath, fast math flags as well as
1058// denormals to optimize fdiv with either rcp or fdiv.fast.
1059//
1060// With rcp:
1061// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
1062// allowed with unsafe-fp-math or afn.
1063//
1064// a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
1065//
1066// With fdiv.fast:
1067// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
1068//
1069// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
1070//
1071// NOTE: rcp is the preference in cases that both are legal.
1072bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
1073 if (DisableFDivExpand)
1074 return false;
1075
1076 Type *Ty = FDiv.getType()->getScalarType();
1077 if (!Ty->isFloatTy())
1078 return false;
1079
1080 // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
1081 // expansion around them in codegen. f16 is good enough to always use.
1082
1083 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
1084 const FastMathFlags DivFMF = FPOp->getFastMathFlags();
1085 const float ReqdAccuracy = FPOp->getFPAccuracy();
1086
1087 FastMathFlags SqrtFMF;
1088
1089 Value *Num = FDiv.getOperand(0);
1090 Value *Den = FDiv.getOperand(1);
1091
1092 Value *RsqOp = nullptr;
1093 auto *DenII = dyn_cast<IntrinsicInst>(Den);
1094 if (DenII && DenII->getIntrinsicID() == Intrinsic::sqrt &&
1095 DenII->hasOneUse()) {
1096 const auto *SqrtOp = cast<FPMathOperator>(DenII);
1097 SqrtFMF = SqrtOp->getFastMathFlags();
1098 if (canOptimizeWithRsq(SqrtOp, DivFMF, SqrtFMF))
1099 RsqOp = SqrtOp->getOperand(0);
1100 }
1101
1102 // Inaccurate rcp is allowed with unsafe-fp-math or afn.
1103 //
1104 // Defer to codegen to handle this.
1105 //
1106 // TODO: Decide on an interpretation for interactions between afn + arcp +
1107 // !fpmath, and make it consistent between here and codegen. For now, defer
1108 // expansion of afn to codegen. The current interpretation is so aggressive we
1109 // don't need any pre-consideration here when we have better information. A
1110 // more conservative interpretation could use handling here.
1111 const bool AllowInaccurateRcp = HasUnsafeFPMath || DivFMF.approxFunc();
1112 if (!RsqOp && AllowInaccurateRcp)
1113 return false;
1114
1115 // Defer the correct implementations to codegen.
1116 if (ReqdAccuracy < 1.0f)
1117 return false;
1118
1119 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
1120 Builder.setFastMathFlags(DivFMF);
1121 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
1122
1125 SmallVector<Value *, 4> RsqDenVals;
1126 extractValues(Builder, NumVals, Num);
1127 extractValues(Builder, DenVals, Den);
1128
1129 if (RsqOp)
1130 extractValues(Builder, RsqDenVals, RsqOp);
1131
1132 SmallVector<Value *, 4> ResultVals(NumVals.size());
1133 for (int I = 0, E = NumVals.size(); I != E; ++I) {
1134 Value *NumElt = NumVals[I];
1135 Value *DenElt = DenVals[I];
1136 Value *RsqDenElt = RsqOp ? RsqDenVals[I] : nullptr;
1137
1138 Value *NewElt =
1139 visitFDivElement(Builder, NumElt, DenElt, DivFMF, SqrtFMF, RsqDenElt,
1140 cast<Instruction>(FPOp), ReqdAccuracy);
1141 if (!NewElt) {
1142 // Keep the original, but scalarized.
1143
1144 // This has the unfortunate side effect of sometimes scalarizing when
1145 // we're not going to do anything.
1146 NewElt = Builder.CreateFDiv(NumElt, DenElt);
1147 if (auto *NewEltInst = dyn_cast<Instruction>(NewElt))
1148 NewEltInst->copyMetadata(FDiv);
1149 }
1150
1151 ResultVals[I] = NewElt;
1152 }
1153
1154 Value *NewVal = insertValues(Builder, FDiv.getType(), ResultVals);
1155
1156 if (NewVal) {
1157 FDiv.replaceAllUsesWith(NewVal);
1158 NewVal->takeName(&FDiv);
1160 }
1161
1162 return true;
1163}
1164
1165static bool hasUnsafeFPMath(const Function &F) {
1166 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
1167 return Attr.getValueAsBool();
1168}
1169
1170static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
1171 Value *LHS, Value *RHS) {
1172 Type *I32Ty = Builder.getInt32Ty();
1173 Type *I64Ty = Builder.getInt64Ty();
1174
1175 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
1176 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
1177 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
1178 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
1179 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
1180 Hi = Builder.CreateTrunc(Hi, I32Ty);
1181 return std::pair(Lo, Hi);
1182}
1183
1184static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
1185 return getMul64(Builder, LHS, RHS).second;
1186}
1187
1188/// Figure out how many bits are really needed for this division. \p AtLeast is
1189/// an optimization hint to bypass the second ComputeNumSignBits call if we the
1190/// first one is insufficient. Returns -1 on failure.
1191int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
1192 Value *Den, unsigned AtLeast,
1193 bool IsSigned) const {
1194 const DataLayout &DL = Mod->getDataLayout();
1195 unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
1196 if (LHSSignBits < AtLeast)
1197 return -1;
1198
1199 unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
1200 if (RHSSignBits < AtLeast)
1201 return -1;
1202
1203 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1204 unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
1205 if (IsSigned)
1206 ++DivBits;
1207 return DivBits;
1208}
1209
1210// The fractional part of a float is enough to accurately represent up to
1211// a 24-bit signed integer.
1212Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
1213 BinaryOperator &I, Value *Num,
1214 Value *Den, bool IsDiv,
1215 bool IsSigned) const {
1216 int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
1217 if (DivBits == -1)
1218 return nullptr;
1219 return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
1220}
1221
1222Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
1223 IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den,
1224 unsigned DivBits, bool IsDiv, bool IsSigned) const {
1225 Type *I32Ty = Builder.getInt32Ty();
1226 Num = Builder.CreateTrunc(Num, I32Ty);
1227 Den = Builder.CreateTrunc(Den, I32Ty);
1228
1229 Type *F32Ty = Builder.getFloatTy();
1230 ConstantInt *One = Builder.getInt32(1);
1231 Value *JQ = One;
1232
1233 if (IsSigned) {
1234 // char|short jq = ia ^ ib;
1235 JQ = Builder.CreateXor(Num, Den);
1236
1237 // jq = jq >> (bitsize - 2)
1238 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
1239
1240 // jq = jq | 0x1
1241 JQ = Builder.CreateOr(JQ, One);
1242 }
1243
1244 // int ia = (int)LHS;
1245 Value *IA = Num;
1246
1247 // int ib, (int)RHS;
1248 Value *IB = Den;
1249
1250 // float fa = (float)ia;
1251 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
1252 : Builder.CreateUIToFP(IA, F32Ty);
1253
1254 // float fb = (float)ib;
1255 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
1256 : Builder.CreateUIToFP(IB,F32Ty);
1257
1258 Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
1259 Builder.getFloatTy());
1260 Value *RCP = Builder.CreateCall(RcpDecl, { FB });
1261 Value *FQM = Builder.CreateFMul(FA, RCP);
1262
1263 // fq = trunc(fqm);
1264 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
1265 FQ->copyFastMathFlags(Builder.getFastMathFlags());
1266
1267 // float fqneg = -fq;
1268 Value *FQNeg = Builder.CreateFNeg(FQ);
1269
1270 // float fr = mad(fqneg, fb, fa);
1271 auto FMAD = !ST->hasMadMacF32Insts()
1272 ? Intrinsic::fma
1273 : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
1274 Value *FR = Builder.CreateIntrinsic(FMAD,
1275 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
1276
1277 // int iq = (int)fq;
1278 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
1279 : Builder.CreateFPToUI(FQ, I32Ty);
1280
1281 // fr = fabs(fr);
1282 FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
1283
1284 // fb = fabs(fb);
1285 FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
1286
1287 // int cv = fr >= fb;
1288 Value *CV = Builder.CreateFCmpOGE(FR, FB);
1289
1290 // jq = (cv ? jq : 0);
1291 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
1292
1293 // dst = iq + jq;
1294 Value *Div = Builder.CreateAdd(IQ, JQ);
1295
1296 Value *Res = Div;
1297 if (!IsDiv) {
1298 // Rem needs compensation, it's easier to recompute it
1299 Value *Rem = Builder.CreateMul(Div, Den);
1300 Res = Builder.CreateSub(Num, Rem);
1301 }
1302
1303 if (DivBits != 0 && DivBits < 32) {
1304 // Extend in register from the number of bits this divide really is.
1305 if (IsSigned) {
1306 int InRegBits = 32 - DivBits;
1307
1308 Res = Builder.CreateShl(Res, InRegBits);
1309 Res = Builder.CreateAShr(Res, InRegBits);
1310 } else {
1311 ConstantInt *TruncMask
1312 = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
1313 Res = Builder.CreateAnd(Res, TruncMask);
1314 }
1315 }
1316
1317 return Res;
1318}
1319
1320// Try to recognize special cases the DAG will emit special, better expansions
1321// than the general expansion we do here.
1322
1323// TODO: It would be better to just directly handle those optimizations here.
1324bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
1325 Value *Num,
1326 Value *Den) const {
1327 if (Constant *C = dyn_cast<Constant>(Den)) {
1328 // Arbitrary constants get a better expansion as long as a wider mulhi is
1329 // legal.
1330 if (C->getType()->getScalarSizeInBits() <= 32)
1331 return true;
1332
1333 // TODO: Sdiv check for not exact for some reason.
1334
1335 // If there's no wider mulhi, there's only a better expansion for powers of
1336 // two.
1337 // TODO: Should really know for each vector element.
1338 if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT))
1339 return true;
1340
1341 return false;
1342 }
1343
1344 if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
1345 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1346 if (BinOpDen->getOpcode() == Instruction::Shl &&
1347 isa<Constant>(BinOpDen->getOperand(0)) &&
1348 isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true,
1349 0, AC, &I, DT)) {
1350 return true;
1351 }
1352 }
1353
1354 return false;
1355}
1356
1357static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) {
1358 // Check whether the sign can be determined statically.
1359 KnownBits Known = computeKnownBits(V, *DL);
1360 if (Known.isNegative())
1361 return Constant::getAllOnesValue(V->getType());
1362 if (Known.isNonNegative())
1363 return Constant::getNullValue(V->getType());
1364 return Builder.CreateAShr(V, Builder.getInt32(31));
1365}
1366
1367Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
1369 Value *Y) const {
1370 Instruction::BinaryOps Opc = I.getOpcode();
1371 assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1372 Opc == Instruction::SRem || Opc == Instruction::SDiv);
1373
1374 FastMathFlags FMF;
1375 FMF.setFast();
1376 Builder.setFastMathFlags(FMF);
1377
1378 if (divHasSpecialOptimization(I, X, Y))
1379 return nullptr; // Keep it for later optimization.
1380
1381 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1382 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1383
1384 Type *Ty = X->getType();
1385 Type *I32Ty = Builder.getInt32Ty();
1386 Type *F32Ty = Builder.getFloatTy();
1387
1388 if (Ty->getScalarSizeInBits() < 32) {
1389 if (IsSigned) {
1390 X = Builder.CreateSExt(X, I32Ty);
1391 Y = Builder.CreateSExt(Y, I32Ty);
1392 } else {
1393 X = Builder.CreateZExt(X, I32Ty);
1394 Y = Builder.CreateZExt(Y, I32Ty);
1395 }
1396 }
1397
1398 if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1399 return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1400 Builder.CreateZExtOrTrunc(Res, Ty);
1401 }
1402
1403 ConstantInt *Zero = Builder.getInt32(0);
1404 ConstantInt *One = Builder.getInt32(1);
1405
1406 Value *Sign = nullptr;
1407 if (IsSigned) {
1408 Value *SignX = getSign32(X, Builder, DL);
1409 Value *SignY = getSign32(Y, Builder, DL);
1410 // Remainder sign is the same as LHS
1411 Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1412
1413 X = Builder.CreateAdd(X, SignX);
1414 Y = Builder.CreateAdd(Y, SignY);
1415
1416 X = Builder.CreateXor(X, SignX);
1417 Y = Builder.CreateXor(Y, SignY);
1418 }
1419
1420 // The algorithm here is based on ideas from "Software Integer Division", Tom
1421 // Rodeheffer, August 2008.
1422 //
1423 // unsigned udiv(unsigned x, unsigned y) {
1424 // // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1425 // // that this is a lower bound on inv(y), even if some of the calculations
1426 // // round up.
1427 // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1428 //
1429 // // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1430 // // Empirically this is guaranteed to give a "two-y" lower bound on
1431 // // inv(y).
1432 // z += umulh(z, -y * z);
1433 //
1434 // // Quotient/remainder estimate.
1435 // unsigned q = umulh(x, z);
1436 // unsigned r = x - q * y;
1437 //
1438 // // Two rounds of quotient/remainder refinement.
1439 // if (r >= y) {
1440 // ++q;
1441 // r -= y;
1442 // }
1443 // if (r >= y) {
1444 // ++q;
1445 // r -= y;
1446 // }
1447 //
1448 // return q;
1449 // }
1450
1451 // Initial estimate of inv(y).
1452 Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1453 Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
1454 Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
1455 Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
1456 Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1457 Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1458
1459 // One round of UNR.
1460 Value *NegY = Builder.CreateSub(Zero, Y);
1461 Value *NegYZ = Builder.CreateMul(NegY, Z);
1462 Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1463
1464 // Quotient/remainder estimate.
1465 Value *Q = getMulHu(Builder, X, Z);
1466 Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1467
1468 // First quotient/remainder refinement.
1469 Value *Cond = Builder.CreateICmpUGE(R, Y);
1470 if (IsDiv)
1471 Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1472 R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1473
1474 // Second quotient/remainder refinement.
1475 Cond = Builder.CreateICmpUGE(R, Y);
1476 Value *Res;
1477 if (IsDiv)
1478 Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1479 else
1480 Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1481
1482 if (IsSigned) {
1483 Res = Builder.CreateXor(Res, Sign);
1484 Res = Builder.CreateSub(Res, Sign);
1485 }
1486
1487 Res = Builder.CreateTrunc(Res, Ty);
1488
1489 return Res;
1490}
1491
1492Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
1493 BinaryOperator &I, Value *Num,
1494 Value *Den) const {
1495 if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1496 return nullptr; // Keep it for later optimization.
1497
1498 Instruction::BinaryOps Opc = I.getOpcode();
1499
1500 bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1501 bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1502
1503 int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1504 if (NumDivBits == -1)
1505 return nullptr;
1506
1507 Value *Narrowed = nullptr;
1508 if (NumDivBits <= 24) {
1509 Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1510 IsDiv, IsSigned);
1511 } else if (NumDivBits <= 32) {
1512 Narrowed = expandDivRem32(Builder, I, Num, Den);
1513 }
1514
1515 if (Narrowed) {
1516 return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1517 Builder.CreateZExt(Narrowed, Num->getType());
1518 }
1519
1520 return nullptr;
1521}
1522
1523void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
1524 Instruction::BinaryOps Opc = I.getOpcode();
1525 // Do the general expansion.
1526 if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1528 return;
1529 }
1530
1531 if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1533 return;
1534 }
1535
1536 llvm_unreachable("not a division");
1537}
1538
1539bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
1540 if (foldBinOpIntoSelect(I))
1541 return true;
1542
1543 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
1544 UA->isUniform(&I) && promoteUniformOpToI32(I))
1545 return true;
1546
1547 if (UseMul24Intrin && replaceMulWithMul24(I))
1548 return true;
1549
1550 bool Changed = false;
1551 Instruction::BinaryOps Opc = I.getOpcode();
1552 Type *Ty = I.getType();
1553 Value *NewDiv = nullptr;
1554 unsigned ScalarSize = Ty->getScalarSizeInBits();
1555
1557
1558 if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1559 Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1560 ScalarSize <= 64 &&
1561 !DisableIDivExpand) {
1562 Value *Num = I.getOperand(0);
1563 Value *Den = I.getOperand(1);
1564 IRBuilder<> Builder(&I);
1565 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1566
1567 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1568 NewDiv = PoisonValue::get(VT);
1569
1570 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1571 Value *NumEltN = Builder.CreateExtractElement(Num, N);
1572 Value *DenEltN = Builder.CreateExtractElement(Den, N);
1573
1574 Value *NewElt;
1575 if (ScalarSize <= 32) {
1576 NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1577 if (!NewElt)
1578 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1579 } else {
1580 // See if this 64-bit division can be shrunk to 32/24-bits before
1581 // producing the general expansion.
1582 NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1583 if (!NewElt) {
1584 // The general 64-bit expansion introduces control flow and doesn't
1585 // return the new value. Just insert a scalar copy and defer
1586 // expanding it.
1587 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1588 Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
1589 }
1590 }
1591
1592 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1593 }
1594 } else {
1595 if (ScalarSize <= 32)
1596 NewDiv = expandDivRem32(Builder, I, Num, Den);
1597 else {
1598 NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1599 if (!NewDiv)
1600 Div64ToExpand.push_back(&I);
1601 }
1602 }
1603
1604 if (NewDiv) {
1605 I.replaceAllUsesWith(NewDiv);
1606 I.eraseFromParent();
1607 Changed = true;
1608 }
1609 }
1610
1611 if (ExpandDiv64InIR) {
1612 // TODO: We get much worse code in specially handled constant cases.
1613 for (BinaryOperator *Div : Div64ToExpand) {
1614 expandDivRem64(*Div);
1615 FlowChanged = true;
1616 Changed = true;
1617 }
1618 }
1619
1620 return Changed;
1621}
1622
1623bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
1624 if (!WidenLoads)
1625 return false;
1626
1627 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1628 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1629 canWidenScalarExtLoad(I)) {
1630 IRBuilder<> Builder(&I);
1631 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1632
1633 Type *I32Ty = Builder.getInt32Ty();
1634 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, I.getPointerOperand());
1635 WidenLoad->copyMetadata(I);
1636
1637 // If we have range metadata, we need to convert the type, and not make
1638 // assumptions about the high bits.
1639 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1641 mdconst::extract<ConstantInt>(Range->getOperand(0));
1642
1643 if (Lower->isNullValue()) {
1644 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1645 } else {
1646 Metadata *LowAndHigh[] = {
1647 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1648 // Don't make assumptions about the high bits.
1650 };
1651
1652 WidenLoad->setMetadata(LLVMContext::MD_range,
1654 }
1655 }
1656
1657 int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
1658 Type *IntNTy = Builder.getIntNTy(TySize);
1659 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1660 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1661 I.replaceAllUsesWith(ValOrig);
1662 I.eraseFromParent();
1663 return true;
1664 }
1665
1666 return false;
1667}
1668
1669bool AMDGPUCodeGenPrepareImpl::visitICmpInst(ICmpInst &I) {
1670 bool Changed = false;
1671
1672 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
1673 UA->isUniform(&I))
1674 Changed |= promoteUniformOpToI32(I);
1675
1676 return Changed;
1677}
1678
1679bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
1680 Value *Cond = I.getCondition();
1681 Value *TrueVal = I.getTrueValue();
1682 Value *FalseVal = I.getFalseValue();
1683 Value *CmpVal;
1685
1686 if (ST->has16BitInsts() && needsPromotionToI32(I.getType())) {
1687 if (UA->isUniform(&I))
1688 return promoteUniformOpToI32(I);
1689 return false;
1690 }
1691
1692 // Match fract pattern with nan check.
1693 if (!match(Cond, m_FCmp(Pred, m_Value(CmpVal), m_NonNaN())))
1694 return false;
1695
1696 FPMathOperator *FPOp = dyn_cast<FPMathOperator>(&I);
1697 if (!FPOp)
1698 return false;
1699
1700 IRBuilder<> Builder(&I);
1701 Builder.setFastMathFlags(FPOp->getFastMathFlags());
1702
1703 auto *IITrue = dyn_cast<IntrinsicInst>(TrueVal);
1704 auto *IIFalse = dyn_cast<IntrinsicInst>(FalseVal);
1705
1706 Value *Fract = nullptr;
1707 if (Pred == FCmpInst::FCMP_UNO && TrueVal == CmpVal && IIFalse &&
1708 CmpVal == matchFractPat(*IIFalse)) {
1709 // isnan(x) ? x : fract(x)
1710 Fract = applyFractPat(Builder, CmpVal);
1711 } else if (Pred == FCmpInst::FCMP_ORD && FalseVal == CmpVal && IITrue &&
1712 CmpVal == matchFractPat(*IITrue)) {
1713 // !isnan(x) ? fract(x) : x
1714 Fract = applyFractPat(Builder, CmpVal);
1715 } else
1716 return false;
1717
1718 Fract->takeName(&I);
1719 I.replaceAllUsesWith(Fract);
1721 return true;
1722}
1723
1724static bool areInSameBB(const Value *A, const Value *B) {
1725 const auto *IA = dyn_cast<Instruction>(A);
1726 const auto *IB = dyn_cast<Instruction>(B);
1727 return IA && IB && IA->getParent() == IB->getParent();
1728}
1729
1730// Helper for breaking large PHIs that returns true when an extractelement on V
1731// is likely to be folded away by the DAG combiner.
1733 const auto *FVT = dyn_cast<FixedVectorType>(V->getType());
1734 if (!FVT)
1735 return false;
1736
1737 const Value *CurVal = V;
1738
1739 // Check for insertelements, keeping track of the elements covered.
1740 BitVector EltsCovered(FVT->getNumElements());
1741 while (const auto *IE = dyn_cast<InsertElementInst>(CurVal)) {
1742 const auto *Idx = dyn_cast<ConstantInt>(IE->getOperand(2));
1743
1744 // Non constant index/out of bounds index -> folding is unlikely.
1745 // The latter is more of a sanity check because canonical IR should just
1746 // have replaced those with poison.
1747 if (!Idx || Idx->getSExtValue() >= FVT->getNumElements())
1748 return false;
1749
1750 const auto *VecSrc = IE->getOperand(0);
1751
1752 // If the vector source is another instruction, it must be in the same basic
1753 // block. Otherwise, the DAGCombiner won't see the whole thing and is
1754 // unlikely to be able to do anything interesting here.
1755 if (isa<Instruction>(VecSrc) && !areInSameBB(VecSrc, IE))
1756 return false;
1757
1758 CurVal = VecSrc;
1759 EltsCovered.set(Idx->getSExtValue());
1760
1761 // All elements covered.
1762 if (EltsCovered.all())
1763 return true;
1764 }
1765
1766 // We either didn't find a single insertelement, or the insertelement chain
1767 // ended before all elements were covered. Check for other interesting values.
1768
1769 // Constants are always interesting because we can just constant fold the
1770 // extractelements.
1771 if (isa<Constant>(CurVal))
1772 return true;
1773
1774 // shufflevector is likely to be profitable if either operand is a constant,
1775 // or if either source is in the same block.
1776 // This is because shufflevector is most often lowered as a series of
1777 // insert/extract elements anyway.
1778 if (const auto *SV = dyn_cast<ShuffleVectorInst>(CurVal)) {
1779 return isa<Constant>(SV->getOperand(1)) ||
1780 areInSameBB(SV, SV->getOperand(0)) ||
1781 areInSameBB(SV, SV->getOperand(1));
1782 }
1783
1784 return false;
1785}
1786
1787static void collectPHINodes(const PHINode &I,
1789 const auto [It, Inserted] = SeenPHIs.insert(&I);
1790 if (!Inserted)
1791 return;
1792
1793 for (const Value *Inc : I.incoming_values()) {
1794 if (const auto *PhiInc = dyn_cast<PHINode>(Inc))
1795 collectPHINodes(*PhiInc, SeenPHIs);
1796 }
1797
1798 for (const User *U : I.users()) {
1799 if (const auto *PhiU = dyn_cast<PHINode>(U))
1800 collectPHINodes(*PhiU, SeenPHIs);
1801 }
1802}
1803
1804bool AMDGPUCodeGenPrepareImpl::canBreakPHINode(const PHINode &I) {
1805 // Check in the cache first.
1806 if (const auto It = BreakPhiNodesCache.find(&I);
1807 It != BreakPhiNodesCache.end())
1808 return It->second;
1809
1810 // We consider PHI nodes as part of "chains", so given a PHI node I, we
1811 // recursively consider all its users and incoming values that are also PHI
1812 // nodes. We then make a decision about all of those PHIs at once. Either they
1813 // all get broken up, or none of them do. That way, we avoid cases where a
1814 // single PHI is/is not broken and we end up reforming/exploding a vector
1815 // multiple times, or even worse, doing it in a loop.
1817 collectPHINodes(I, WorkList);
1818
1819#ifndef NDEBUG
1820 // Check that none of the PHI nodes in the worklist are in the map. If some of
1821 // them are, it means we're not good enough at collecting related PHIs.
1822 for (const PHINode *WLP : WorkList) {
1823 assert(BreakPhiNodesCache.count(WLP) == 0);
1824 }
1825#endif
1826
1827 // To consider a PHI profitable to break, we need to see some interesting
1828 // incoming values. At least 2/3rd (rounded up) of all PHIs in the worklist
1829 // must have one to consider all PHIs breakable.
1830 //
1831 // This threshold has been determined through performance testing.
1832 //
1833 // Note that the computation below is equivalent to
1834 //
1835 // (unsigned)ceil((K / 3.0) * 2)
1836 //
1837 // It's simply written this way to avoid mixing integral/FP arithmetic.
1838 const auto Threshold = (alignTo(WorkList.size() * 2, 3) / 3);
1839 unsigned NumBreakablePHIs = 0;
1840 bool CanBreak = false;
1841 for (const PHINode *Cur : WorkList) {
1842 // Don't break PHIs that have no interesting incoming values. That is, where
1843 // there is no clear opportunity to fold the "extractelement" instructions
1844 // we would add.
1845 //
1846 // Note: IC does not run after this pass, so we're only interested in the
1847 // foldings that the DAG combiner can do.
1848 if (any_of(Cur->incoming_values(), isInterestingPHIIncomingValue)) {
1849 if (++NumBreakablePHIs >= Threshold) {
1850 CanBreak = true;
1851 break;
1852 }
1853 }
1854 }
1855
1856 for (const PHINode *Cur : WorkList)
1857 BreakPhiNodesCache[Cur] = CanBreak;
1858
1859 return CanBreak;
1860}
1861
1862/// Helper class for "break large PHIs" (visitPHINode).
1863///
1864/// This represents a slice of a PHI's incoming value, which is made up of:
1865/// - The type of the slice (Ty)
1866/// - The index in the incoming value's vector where the slice starts (Idx)
1867/// - The number of elements in the slice (NumElts).
1868/// It also keeps track of the NewPHI node inserted for this particular slice.
1869///
1870/// Slice examples:
1871/// <4 x i64> -> Split into four i64 slices.
1872/// -> [i64, 0, 1], [i64, 1, 1], [i64, 2, 1], [i64, 3, 1]
1873/// <5 x i16> -> Split into 2 <2 x i16> slices + a i16 tail.
1874/// -> [<2 x i16>, 0, 2], [<2 x i16>, 2, 2], [i16, 4, 1]
1876public:
1877 VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
1878 : Ty(Ty), Idx(Idx), NumElts(NumElts) {}
1879
1880 Type *Ty = nullptr;
1881 unsigned Idx = 0;
1882 unsigned NumElts = 0;
1883 PHINode *NewPHI = nullptr;
1884
1885 /// Slice \p Inc according to the information contained within this slice.
1886 /// This is cached, so if called multiple times for the same \p BB & \p Inc
1887 /// pair, it returns the same Sliced value as well.
1888 ///
1889 /// Note this *intentionally* does not return the same value for, say,
1890 /// [%bb.0, %0] & [%bb.1, %0] as:
1891 /// - It could cause issues with dominance (e.g. if bb.1 is seen first, then
1892 /// the value in bb.1 may not be reachable from bb.0 if it's its
1893 /// predecessor.)
1894 /// - We also want to make our extract instructions as local as possible so
1895 /// the DAG has better chances of folding them out. Duplicating them like
1896 /// that is beneficial in that regard.
1897 ///
1898 /// This is both a minor optimization to avoid creating duplicate
1899 /// instructions, but also a requirement for correctness. It is not forbidden
1900 /// for a PHI node to have the same [BB, Val] pair multiple times. If we
1901 /// returned a new value each time, those previously identical pairs would all
1902 /// have different incoming values (from the same block) and it'd cause a "PHI
1903 /// node has multiple entries for the same basic block with different incoming
1904 /// values!" verifier error.
1905 Value *getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName) {
1906 Value *&Res = SlicedVals[{BB, Inc}];
1907 if (Res)
1908 return Res;
1909
1911 if (Instruction *IncInst = dyn_cast<Instruction>(Inc))
1912 B.SetCurrentDebugLocation(IncInst->getDebugLoc());
1913
1914 if (NumElts > 1) {
1916 for (unsigned K = Idx; K < (Idx + NumElts); ++K)
1917 Mask.push_back(K);
1918 Res = B.CreateShuffleVector(Inc, Mask, NewValName);
1919 } else
1920 Res = B.CreateExtractElement(Inc, Idx, NewValName);
1921
1922 return Res;
1923 }
1924
1925private:
1927};
1928
1929bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
1930 // Break-up fixed-vector PHIs into smaller pieces.
1931 // Default threshold is 32, so it breaks up any vector that's >32 bits into
1932 // its elements, or into 32-bit pieces (for 8/16 bit elts).
1933 //
1934 // This is only helpful for DAGISel because it doesn't handle large PHIs as
1935 // well as GlobalISel. DAGISel lowers PHIs by using CopyToReg/CopyFromReg.
1936 // With large, odd-sized PHIs we may end up needing many `build_vector`
1937 // operations with most elements being "undef". This inhibits a lot of
1938 // optimization opportunities and can result in unreasonably high register
1939 // pressure and the inevitable stack spilling.
1940 if (!BreakLargePHIs || getCGPassBuilderOption().EnableGlobalISelOption)
1941 return false;
1942
1943 FixedVectorType *FVT = dyn_cast<FixedVectorType>(I.getType());
1944 if (!FVT || FVT->getNumElements() == 1 ||
1945 DL->getTypeSizeInBits(FVT) <= BreakLargePHIsThreshold)
1946 return false;
1947
1948 if (!ForceBreakLargePHIs && !canBreakPHINode(I))
1949 return false;
1950
1951 std::vector<VectorSlice> Slices;
1952
1953 Type *EltTy = FVT->getElementType();
1954 {
1955 unsigned Idx = 0;
1956 // For 8/16 bits type, don't scalarize fully but break it up into as many
1957 // 32-bit slices as we can, and scalarize the tail.
1958 const unsigned EltSize = DL->getTypeSizeInBits(EltTy);
1959 const unsigned NumElts = FVT->getNumElements();
1960 if (EltSize == 8 || EltSize == 16) {
1961 const unsigned SubVecSize = (32 / EltSize);
1962 Type *SubVecTy = FixedVectorType::get(EltTy, SubVecSize);
1963 for (unsigned End = alignDown(NumElts, SubVecSize); Idx < End;
1964 Idx += SubVecSize)
1965 Slices.emplace_back(SubVecTy, Idx, SubVecSize);
1966 }
1967
1968 // Scalarize all remaining elements.
1969 for (; Idx < NumElts; ++Idx)
1970 Slices.emplace_back(EltTy, Idx, 1);
1971 }
1972
1973 assert(Slices.size() > 1);
1974
1975 // Create one PHI per vector piece. The "VectorSlice" class takes care of
1976 // creating the necessary instruction to extract the relevant slices of each
1977 // incoming value.
1978 IRBuilder<> B(I.getParent());
1979 B.SetCurrentDebugLocation(I.getDebugLoc());
1980
1981 unsigned IncNameSuffix = 0;
1982 for (VectorSlice &S : Slices) {
1983 // We need to reset the build on each iteration, because getSlicedVal may
1984 // have inserted something into I's BB.
1985 B.SetInsertPoint(I.getParent()->getFirstNonPHI());
1986 S.NewPHI = B.CreatePHI(S.Ty, I.getNumIncomingValues());
1987
1988 for (const auto &[Idx, BB] : enumerate(I.blocks())) {
1989 S.NewPHI->addIncoming(S.getSlicedVal(BB, I.getIncomingValue(Idx),
1990 "largephi.extractslice" +
1991 std::to_string(IncNameSuffix++)),
1992 BB);
1993 }
1994 }
1995
1996 // And replace this PHI with a vector of all the previous PHI values.
1997 Value *Vec = PoisonValue::get(FVT);
1998 unsigned NameSuffix = 0;
1999 for (VectorSlice &S : Slices) {
2000 const auto ValName = "largephi.insertslice" + std::to_string(NameSuffix++);
2001 if (S.NumElts > 1)
2002 Vec =
2003 B.CreateInsertVector(FVT, Vec, S.NewPHI, B.getInt64(S.Idx), ValName);
2004 else
2005 Vec = B.CreateInsertElement(Vec, S.NewPHI, S.Idx, ValName);
2006 }
2007
2008 I.replaceAllUsesWith(Vec);
2009 I.eraseFromParent();
2010 return true;
2011}
2012
2013bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
2014 switch (I.getIntrinsicID()) {
2015 case Intrinsic::bitreverse:
2016 return visitBitreverseIntrinsicInst(I);
2017 case Intrinsic::minnum:
2018 return visitMinNum(I);
2019 case Intrinsic::sqrt:
2020 return visitSqrt(I);
2021 default:
2022 return false;
2023 }
2024}
2025
2026bool AMDGPUCodeGenPrepareImpl::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
2027 bool Changed = false;
2028
2029 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
2030 UA->isUniform(&I))
2031 Changed |= promoteUniformBitreverseToI32(I);
2032
2033 return Changed;
2034}
2035
2036/// Match non-nan fract pattern.
2037/// minnum(fsub(x, floor(x)), nextafter(1.0, -1.0)
2038///
2039/// If fract is a useful instruction for the subtarget. Does not account for the
2040/// nan handling; the instruction has a nan check on the input value.
2041Value *AMDGPUCodeGenPrepareImpl::matchFractPat(IntrinsicInst &I) {
2042 if (ST->hasFractBug())
2043 return nullptr;
2044
2045 if (I.getIntrinsicID() != Intrinsic::minnum)
2046 return nullptr;
2047
2048 Type *Ty = I.getType();
2049 if (!isLegalFloatingTy(Ty->getScalarType()))
2050 return nullptr;
2051
2052 Value *Arg0 = I.getArgOperand(0);
2053 Value *Arg1 = I.getArgOperand(1);
2054
2055 const APFloat *C;
2056 if (!match(Arg1, m_APFloat(C)))
2057 return nullptr;
2058
2059 APFloat One(1.0);
2060 bool LosesInfo;
2061 One.convert(C->getSemantics(), APFloat::rmNearestTiesToEven, &LosesInfo);
2062
2063 // Match nextafter(1.0, -1)
2064 One.next(true);
2065 if (One != *C)
2066 return nullptr;
2067
2068 Value *FloorSrc;
2069 if (match(Arg0, m_FSub(m_Value(FloorSrc),
2070 m_Intrinsic<Intrinsic::floor>(m_Deferred(FloorSrc)))))
2071 return FloorSrc;
2072 return nullptr;
2073}
2074
2075Value *AMDGPUCodeGenPrepareImpl::applyFractPat(IRBuilder<> &Builder,
2076 Value *FractArg) {
2077 SmallVector<Value *, 4> FractVals;
2078 extractValues(Builder, FractVals, FractArg);
2079
2080 SmallVector<Value *, 4> ResultVals(FractVals.size());
2081
2082 Type *Ty = FractArg->getType()->getScalarType();
2083 for (unsigned I = 0, E = FractVals.size(); I != E; ++I) {
2084 ResultVals[I] =
2085 Builder.CreateIntrinsic(Intrinsic::amdgcn_fract, {Ty}, {FractVals[I]});
2086 }
2087
2088 return insertValues(Builder, FractArg->getType(), ResultVals);
2089}
2090
2091bool AMDGPUCodeGenPrepareImpl::visitMinNum(IntrinsicInst &I) {
2092 Value *FractArg = matchFractPat(I);
2093 if (!FractArg)
2094 return false;
2095
2096 // Match pattern for fract intrinsic in contexts where the nan check has been
2097 // optimized out (and hope the knowledge the source can't be nan wasn't lost).
2098 if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, *DL, TLInfo))
2099 return false;
2100
2101 IRBuilder<> Builder(&I);
2102 FastMathFlags FMF = I.getFastMathFlags();
2103 FMF.setNoNaNs();
2104 Builder.setFastMathFlags(FMF);
2105
2106 Value *Fract = applyFractPat(Builder, FractArg);
2107 Fract->takeName(&I);
2108 I.replaceAllUsesWith(Fract);
2109
2111 return true;
2112}
2113
2114static bool isOneOrNegOne(const Value *Val) {
2115 const APFloat *C;
2116 return match(Val, m_APFloat(C)) && C->getExactLog2Abs() == 0;
2117}
2118
2119// Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way.
2120bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
2121 Type *Ty = Sqrt.getType()->getScalarType();
2122 if (!Ty->isFloatTy() && (!Ty->isHalfTy() || ST->has16BitInsts()))
2123 return false;
2124
2125 const FPMathOperator *FPOp = cast<const FPMathOperator>(&Sqrt);
2126 FastMathFlags SqrtFMF = FPOp->getFastMathFlags();
2127
2128 // We're trying to handle the fast-but-not-that-fast case only. The lowering
2129 // of fast llvm.sqrt will give the raw instruction anyway.
2130 if (SqrtFMF.approxFunc() || HasUnsafeFPMath)
2131 return false;
2132
2133 const float ReqdAccuracy = FPOp->getFPAccuracy();
2134
2135 // Defer correctly rounded expansion to codegen.
2136 if (ReqdAccuracy < 1.0f)
2137 return false;
2138
2139 // FIXME: This is an ugly hack for this pass using forward iteration instead
2140 // of reverse. If it worked like a normal combiner, the rsq would form before
2141 // we saw a sqrt call.
2142 auto *FDiv =
2143 dyn_cast_or_null<FPMathOperator>(Sqrt.getUniqueUndroppableUser());
2144 if (FDiv && FDiv->getOpcode() == Instruction::FDiv &&
2145 FDiv->getFPAccuracy() >= 1.0f &&
2146 canOptimizeWithRsq(FPOp, FDiv->getFastMathFlags(), SqrtFMF) &&
2147 // TODO: We should also handle the arcp case for the fdiv with non-1 value
2148 isOneOrNegOne(FDiv->getOperand(0)))
2149 return false;
2150
2151 Value *SrcVal = Sqrt.getOperand(0);
2152 bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt);
2153
2154 // The raw instruction is 1 ulp, but the correction for denormal handling
2155 // brings it to 2.
2156 if (!CanTreatAsDAZ && ReqdAccuracy < 2.0f)
2157 return false;
2158
2159 IRBuilder<> Builder(&Sqrt);
2161 extractValues(Builder, SrcVals, SrcVal);
2162
2163 SmallVector<Value *, 4> ResultVals(SrcVals.size());
2164 for (int I = 0, E = SrcVals.size(); I != E; ++I) {
2165 if (CanTreatAsDAZ)
2166 ResultVals[I] = Builder.CreateCall(getSqrtF32(), SrcVals[I]);
2167 else
2168 ResultVals[I] = emitSqrtIEEE2ULP(Builder, SrcVals[I], SqrtFMF);
2169 }
2170
2171 Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals);
2172 NewSqrt->takeName(&Sqrt);
2173 Sqrt.replaceAllUsesWith(NewSqrt);
2174 Sqrt.eraseFromParent();
2175 return true;
2176}
2177
2178bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
2179 Impl.Mod = &M;
2180 Impl.DL = &Impl.Mod->getDataLayout();
2181 Impl.SqrtF32 = nullptr;
2182 Impl.LdexpF32 = nullptr;
2183 return false;
2184}
2185
2186bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
2187 if (skipFunction(F))
2188 return false;
2189
2190 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
2191 if (!TPC)
2192 return false;
2193
2194 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
2195 Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2196 Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
2197 Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2198 Impl.UA = &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
2199 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
2200 Impl.DT = DTWP ? &DTWP->getDomTree() : nullptr;
2201 Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
2203 Impl.HasFP32DenormalFlush =
2204 Mode.FP32Denormals == DenormalMode::getPreserveSign();
2205 return Impl.run(F);
2206}
2207
2210 AMDGPUCodeGenPrepareImpl Impl;
2211 Impl.Mod = F.getParent();
2212 Impl.DL = &Impl.Mod->getDataLayout();
2213 Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
2214 Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
2215 Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);
2218 Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
2220 Impl.HasFP32DenormalFlush =
2221 Mode.FP32Denormals == DenormalMode::getPreserveSign();
2223 if (!Impl.FlowChanged)
2225 return Impl.run(F) ? PA : PreservedAnalyses::all();
2226}
2227
2228INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
2229 "AMDGPU IR optimizations", false, false)
2233INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
2235
2236char AMDGPUCodeGenPrepare::ID = 0;
2237
2239 return new AMDGPUCodeGenPrepare();
2240}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool promotedOpIsNSW(const Instruction &I)
static Value * insertValues(IRBuilder<> &Builder, Type *Ty, SmallVectorImpl< Value * > &Values)
static bool promotedOpIsNUW(const Instruction &I)
static bool isOneOrNegOne(const Value *Val)
static void extractValues(IRBuilder<> &Builder, SmallVectorImpl< Value * > &Values, Value *V)
static Value * getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static bool isInterestingPHIIncomingValue(const Value *V)
static SelectInst * findSelectThroughCast(Value *V, CastInst *&Cast)
static std::pair< Value *, Value * > getMul64(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static bool hasUnsafeFPMath(const Function &F)
static Value * emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src, bool IsNegative)
Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
static Value * getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL)
static void collectPHINodes(const PHINode &I, SmallPtrSet< const PHINode *, 8 > &SeenPHIs)
static bool areInSameBB(const Value *A, const Value *B)
static cl::opt< bool > WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads", cl::desc("Widen sub-dword constant address space loads in " "AMDGPULateCodeGenPrepare"), cl::ReallyHidden, cl::init(true))
The AMDGPU TargetMachine interface definition for hw codegen targets.
@ Scaled
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:478
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
#define DEBUG_TYPE
Legalize the Machine IR a function s Machine IR
Definition: Legalizer.cpp:81
Generic memory optimizations
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Metadata * LowAndHigh[]
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
Module * Mod
FunctionAnalysisManager FAM
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
LLVM IR instance of the generic uniformity analysis.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
BinaryOperator * Mul
support::ulittle16_t & Lo
Definition: aarch32.cpp:205
support::ulittle16_t & Hi
Definition: aarch32.cpp:204
Helper class for "break large PHIs" (visitPHINode).
VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
Value * getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName)
Slice Inc according to the information contained within this slice.
PreservedAnalyses run(Function &, FunctionAnalysisManager &)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition: APFloat.h:1026
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:649
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:822
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:803
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:304
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:450
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:437
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:213
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:228
BinaryOps getOpcode() const
Definition: InstrTypes.h:391
BitVector & set()
Definition: BitVector.h:351
bool all() const
all - Returns true if all bits are set.
Definition: BitVector.h:175
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:133
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:451
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:691
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:748
static ConstantAsMetadata * get(Constant *C)
Definition: Metadata.h:506
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:261
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:927
bool isExactlyValue(const APFloat &V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
Definition: Constants.cpp:1043
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:403
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:277
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:164
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:170
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Definition: Operator.h:288
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
void setFast(bool B=true)
Definition: FMF.h:97
bool noInfs() const
Definition: FMF.h:67
bool allowReciprocal() const
Definition: FMF.h:69
bool approxFunc() const
Definition: FMF.h:71
void setNoNaNs(bool B=true)
Definition: FMF.h:79
bool noNaNs() const
Definition: FMF.h:66
bool allowContract() const
Definition: FMF.h:70
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:699
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
BasicBlockListType::iterator iterator
Definition: Function.h:67
This instruction compares its operands according to the predicate given to the constructor.
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Definition: IRBuilder.cpp:913
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1996
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1715
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2445
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1608
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2067
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition: IRBuilder.h:2433
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:533
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2018
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2489
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:930
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1108
Value * CreateFPToUI(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2046
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2012
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1431
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:520
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:305
Value * CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2269
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:220
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition: IRBuilder.h:525
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2060
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:485
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:480
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1338
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2100
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1789
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1410
FastMathFlags getFastMathFlags() const
Get the flags to be applied to created floating point ops.
Definition: IRBuilder.h:294
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2000
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1469
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1321
Type * getFloatTy()
Fetch the type representing a 32-bit floating point value.
Definition: IRBuilder.h:548
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1491
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1660
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2226
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2385
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1450
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1513
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition: IRBuilder.h:1581
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1729
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
Definition: IRBuilder.h:2033
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1355
Value * CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2264
Value * CreateFPToSI(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2053
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2639
Base class for instruction visitors.
Definition: InstVisitor.h:78
RetTy visitIntrinsicInst(IntrinsicInst &I)
Definition: InstVisitor.h:219
RetTy visitPHINode(PHINode &I)
Definition: InstVisitor.h:175
RetTy visitBinaryOperator(BinaryOperator &I)
Definition: InstVisitor.h:261
RetTy visitICmpInst(ICmpInst &I)
Definition: InstVisitor.h:166
RetTy visitSelectInst(SelectInst &I)
Definition: InstVisitor.h:189
void visitInstruction(Instruction &I)
Definition: InstVisitor.h:280
RetTy visitLoadInst(LoadInst &I)
Definition: InstVisitor.h:169
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:438
const BasicBlock * getParent() const
Definition: Instruction.h:139
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:93
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:346
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1586
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
Definition: DerivedTypes.h:40
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Definition: DerivedTypes.h:72
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:177
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1504
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:283
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:275
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
virtual bool doInitialization(Module &)
doInitialization - Virtual method overridden by subclasses to do any necessary initialization before ...
Definition: Pass.h:119
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:172
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:175
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:178
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:208
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition: Type.h:154
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition: Type.h:143
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition: Type.h:157
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
Analysis pass which computes UniformityInfo.
Legacy analysis pass which computes a CycleInfo.
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
User * getUniqueUndroppableUser()
Return true if there is exactly one unique user of this value that cannot be dropped (that user can h...
Definition: Value.cpp:179
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Type * getElementType() const
Definition: DerivedTypes.h:436
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
Definition: AMDGPU.h:414
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:410
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition: ISDOpcodes.h:487
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1444
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:798
cstfp_pred_ty< is_nonnan > m_NonNaN()
Match a non-NaN FP constant.
Definition: PatternMatch.h:625
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:295
@ ReallyHidden
Definition: CommandLine.h:139
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:533
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2375
bool expandRemainderUpTo64Bits(BinaryOperator *Rem)
Generate code to calculate the remainder of two integers, replacing Rem with the generated code.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool isKnownNeverNaN(const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
bool expandDivisionUpTo64Bits(BinaryOperator *Div)
Generate code to divide two integers, replacing Div with the generated code.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
FunctionPass * createAMDGPUCodeGenPreparePass()
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:428
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Get the upper bound on bit size for this Value Op as a signed integer.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, const DataLayout &DL, FPClassTest InterestedClasses=fcAllFlags, unsigned Depth=0, const TargetLibraryInfo *TLI=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
CGPassBuilderOption getCGPassBuilderOption()
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static constexpr DenormalMode getPreserveSign()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.