LLVM 22.0.0git
X86PartialReduction.cpp
Go to the documentation of this file.
1//===-- X86PartialReduction.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass looks for add instructions used by a horizontal reduction to see
10// if we might be able to use pmaddwd or psadbw. Some cases of this require
11// cross basic block knowledge and can't be done in SelectionDAG.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86TargetMachine.h"
19#include "llvm/IR/Analysis.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/IntrinsicsX86.h"
24#include "llvm/IR/PassManager.h"
26#include "llvm/Pass.h"
28
29using namespace llvm;
30
31#define DEBUG_TYPE "x86-partial-reduction"
32
33namespace {
34
35class X86PartialReduction {
36 const X86TargetMachine *TM;
37 const DataLayout *DL = nullptr;
38 const X86Subtarget *ST = nullptr;
39
40public:
41 X86PartialReduction(const X86TargetMachine *TM) : TM(TM) {}
42 bool run(Function &F);
43
44private:
45 bool tryMAddReplacement(Instruction *Op, bool ReduceInOneBB);
46 bool trySADReplacement(Instruction *Op);
47};
48
49class X86PartialReductionLegacy : public FunctionPass {
50public:
51 static char ID; // Pass identification, replacement for typeid.
52
53 X86PartialReductionLegacy() : FunctionPass(ID) {}
54
55 bool runOnFunction(Function &F) override;
56
57 void getAnalysisUsage(AnalysisUsage &AU) const override {
58 AU.setPreservesCFG();
59 }
60
61 StringRef getPassName() const override { return "X86 Partial Reduction"; }
62};
63}
64
66 return new X86PartialReductionLegacy();
67}
68
69char X86PartialReductionLegacy::ID = 0;
70
71INITIALIZE_PASS(X86PartialReductionLegacy, DEBUG_TYPE, "X86 Partial Reduction",
72 false, false)
73
74// This function should be aligned with detectExtMul() in X86ISelLowering.cpp.
75static bool matchVPDPBUSDPattern(const X86Subtarget *ST, BinaryOperator *Mul,
77 if (!ST->hasVNNI() && !ST->hasAVXVNNI())
78 return false;
79
80 Value *LHS = Mul->getOperand(0);
81 Value *RHS = Mul->getOperand(1);
82
85
86 auto IsFreeTruncation = [&](Value *Op) {
87 if (auto *Cast = dyn_cast<CastInst>(Op)) {
88 if (Cast->getParent() == Mul->getParent() &&
89 (Cast->getOpcode() == Instruction::SExt ||
90 Cast->getOpcode() == Instruction::ZExt) &&
91 Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 8)
92 return true;
93 }
94
95 return isa<Constant>(Op);
96 };
97
98 // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
99 // value, we need to check LHS is zero extended value. RHS should be signed
100 // value, so we just check the signed bits.
102 computeKnownBits(LHS, *DL).countMaxActiveBits() <= 8) &&
104 return true;
105
106 return false;
107}
108
109bool X86PartialReduction::tryMAddReplacement(Instruction *Op,
110 bool ReduceInOneBB) {
111 if (!ST->hasSSE2())
112 return false;
113
114 // Need at least 8 elements.
115 if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8)
116 return false;
117
118 // Element type should be i32.
119 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
120 return false;
121
123 if (!Mul || Mul->getOpcode() != Instruction::Mul)
124 return false;
125
126 Value *LHS = Mul->getOperand(0);
127 Value *RHS = Mul->getOperand(1);
128
129 // If the target support VNNI, leave it to ISel to combine reduce operation
130 // to VNNI instruction.
131 // TODO: we can support transforming reduce to VNNI intrinsic for across block
132 // in this pass.
133 if (ReduceInOneBB && matchVPDPBUSDPattern(ST, Mul, DL))
134 return false;
135
136 // LHS and RHS should be only used once or if they are the same then only
137 // used twice. Only check this when SSE4.1 is enabled and we have zext/sext
138 // instructions, otherwise we use punpck to emulate zero extend in stages. The
139 // trunc/ we need to do likely won't introduce new instructions in that case.
140 if (ST->hasSSE41()) {
141 if (LHS == RHS) {
142 if (!isa<Constant>(LHS) && !LHS->hasNUses(2))
143 return false;
144 } else {
145 if (!isa<Constant>(LHS) && !LHS->hasOneUse())
146 return false;
147 if (!isa<Constant>(RHS) && !RHS->hasOneUse())
148 return false;
149 }
150 }
151
152 auto CanShrinkOp = [&](Value *Op) {
153 auto IsFreeTruncation = [&](Value *Op) {
154 if (auto *Cast = dyn_cast<CastInst>(Op)) {
155 if (Cast->getParent() == Mul->getParent() &&
156 (Cast->getOpcode() == Instruction::SExt ||
157 Cast->getOpcode() == Instruction::ZExt) &&
158 Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16)
159 return true;
160 }
161
162 return isa<Constant>(Op);
163 };
164
165 // If the operation can be freely truncated and has enough sign bits we
166 // can shrink.
167 if (IsFreeTruncation(Op) && ComputeNumSignBits(Op, *DL, nullptr, Mul) > 16)
168 return true;
169
170 // SelectionDAG has limited support for truncating through an add or sub if
171 // the inputs are freely truncatable.
172 if (auto *BO = dyn_cast<BinaryOperator>(Op)) {
173 if (BO->getParent() == Mul->getParent() &&
174 IsFreeTruncation(BO->getOperand(0)) &&
175 IsFreeTruncation(BO->getOperand(1)) &&
176 ComputeNumSignBits(Op, *DL, nullptr, Mul) > 16)
177 return true;
178 }
179
180 return false;
181 };
182
183 // Both Ops need to be shrinkable.
184 if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS))
185 return false;
186
187 IRBuilder<> Builder(Mul);
188
189 auto *MulTy = cast<FixedVectorType>(Op->getType());
190 unsigned NumElts = MulTy->getNumElements();
191
192 // Extract even elements and odd elements and add them together. This will
193 // be pattern matched by SelectionDAG to pmaddwd. This instruction will be
194 // half the original width.
195 SmallVector<int, 16> EvenMask(NumElts / 2);
196 SmallVector<int, 16> OddMask(NumElts / 2);
197 for (int i = 0, e = NumElts / 2; i != e; ++i) {
198 EvenMask[i] = i * 2;
199 OddMask[i] = i * 2 + 1;
200 }
201 // Creating a new mul so the replaceAllUsesWith below doesn't replace the
202 // uses in the shuffles we're creating.
203 Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1));
204 Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask);
205 Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask);
206 Value *MAdd = Builder.CreateAdd(EvenElts, OddElts);
207
208 // Concatenate zeroes to extend back to the original type.
209 SmallVector<int, 32> ConcatMask(NumElts);
210 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
211 Value *Zero = Constant::getNullValue(MAdd->getType());
212 Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask);
213
216
217 return true;
218}
219
220bool X86PartialReduction::trySADReplacement(Instruction *Op) {
221 if (!ST->hasSSE2())
222 return false;
223
224 // TODO: There's nothing special about i32, any integer type above i16 should
225 // work just as well.
226 if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32))
227 return false;
228
229 Value *LHS;
231 LHS = Op->getOperand(0);
232 } else {
233 // Operand should be a select.
234 auto *SI = dyn_cast<SelectInst>(Op);
235 if (!SI)
236 return false;
237
238 Value *RHS;
239 // Select needs to implement absolute value.
240 auto SPR = matchSelectPattern(SI, LHS, RHS);
241 if (SPR.Flavor != SPF_ABS)
242 return false;
243 }
244
245 // Need a subtract of two values.
247 if (!Sub || Sub->getOpcode() != Instruction::Sub)
248 return false;
249
250 // Look for zero extend from i8.
251 auto getZeroExtendedVal = [](Value *Op) -> Value * {
252 if (auto *ZExt = dyn_cast<ZExtInst>(Op))
253 if (cast<VectorType>(ZExt->getOperand(0)->getType())
254 ->getElementType()
255 ->isIntegerTy(8))
256 return ZExt->getOperand(0);
257
258 return nullptr;
259 };
260
261 // Both operands of the subtract should be extends from vXi8.
262 Value *Op0 = getZeroExtendedVal(Sub->getOperand(0));
263 Value *Op1 = getZeroExtendedVal(Sub->getOperand(1));
264 if (!Op0 || !Op1)
265 return false;
266
267 IRBuilder<> Builder(Op);
268
269 auto *OpTy = cast<FixedVectorType>(Op->getType());
270 unsigned NumElts = OpTy->getNumElements();
271
272 unsigned IntrinsicNumElts;
273 Intrinsic::ID IID;
274 if (ST->hasBWI() && NumElts >= 64) {
275 IID = Intrinsic::x86_avx512_psad_bw_512;
276 IntrinsicNumElts = 64;
277 } else if (ST->hasAVX2() && NumElts >= 32) {
278 IID = Intrinsic::x86_avx2_psad_bw;
279 IntrinsicNumElts = 32;
280 } else {
281 IID = Intrinsic::x86_sse2_psad_bw;
282 IntrinsicNumElts = 16;
283 }
284
285 Function *PSADBWFn = Intrinsic::getOrInsertDeclaration(Op->getModule(), IID);
286
287 if (NumElts < 16) {
288 // Pad input with zeroes.
289 SmallVector<int, 32> ConcatMask(16);
290 for (unsigned i = 0; i != NumElts; ++i)
291 ConcatMask[i] = i;
292 for (unsigned i = NumElts; i != 16; ++i)
293 ConcatMask[i] = (i % NumElts) + NumElts;
294
296 Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask);
297 Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask);
298 NumElts = 16;
299 }
300
301 // Intrinsics produce vXi64 and need to be casted to vXi32.
302 auto *I32Ty =
303 FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4);
304
305 assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!");
306 unsigned NumSplits = NumElts / IntrinsicNumElts;
307
308 // First collect the pieces we need.
309 SmallVector<Value *, 4> Ops(NumSplits);
310 for (unsigned i = 0; i != NumSplits; ++i) {
311 SmallVector<int, 64> ExtractMask(IntrinsicNumElts);
312 std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts);
313 Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask);
314 Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask);
315 Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1});
316 Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty);
317 }
318
319 assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits");
320 unsigned Stages = Log2_32(NumSplits);
321 for (unsigned s = Stages; s > 0; --s) {
322 unsigned NumConcatElts =
323 cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2;
324 for (unsigned i = 0; i != 1U << (s - 1); ++i) {
325 SmallVector<int, 64> ConcatMask(NumConcatElts);
326 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
327 Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask);
328 }
329 }
330
331 // At this point the final value should be in Ops[0]. Now we need to adjust
332 // it to the final original type.
333 NumElts = cast<FixedVectorType>(OpTy)->getNumElements();
334 if (NumElts == 2) {
335 // Extract down to 2 elements.
336 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1});
337 } else if (NumElts >= 8) {
338 SmallVector<int, 32> ConcatMask(NumElts);
339 unsigned SubElts =
340 cast<FixedVectorType>(Ops[0]->getType())->getNumElements();
341 for (unsigned i = 0; i != SubElts; ++i)
342 ConcatMask[i] = i;
343 for (unsigned i = SubElts; i != NumElts; ++i)
344 ConcatMask[i] = (i % SubElts) + SubElts;
345
347 Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask);
348 }
349
350 Op->replaceAllUsesWith(Ops[0]);
351 Op->eraseFromParent();
352
353 return true;
354}
355
356// Walk backwards from the ExtractElementInst and determine if it is the end of
357// a horizontal reduction. Return the input to the reduction if we find one.
359 bool &ReduceInOneBB) {
360 ReduceInOneBB = true;
361 // Make sure we're extracting index 0.
362 auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand());
363 if (!Index || !Index->isNullValue())
364 return nullptr;
365
366 const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand());
367 if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse())
368 return nullptr;
369 if (EE.getParent() != BO->getParent())
370 ReduceInOneBB = false;
371
372 unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements();
373 // Ensure the reduction size is a power of 2.
374 if (!isPowerOf2_32(NumElems))
375 return nullptr;
376
377 const Value *Op = BO;
378 unsigned Stages = Log2_32(NumElems);
379 for (unsigned i = 0; i != Stages; ++i) {
380 const auto *BO = dyn_cast<BinaryOperator>(Op);
381 if (!BO || BO->getOpcode() != Instruction::Add)
382 return nullptr;
383 if (EE.getParent() != BO->getParent())
384 ReduceInOneBB = false;
385
386 // If this isn't the first add, then it should only have 2 users, the
387 // shuffle and another add which we checked in the previous iteration.
388 if (i != 0 && !BO->hasNUses(2))
389 return nullptr;
390
391 Value *LHS = BO->getOperand(0);
392 Value *RHS = BO->getOperand(1);
393
394 auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS);
395 if (Shuffle) {
396 Op = RHS;
397 } else {
399 Op = LHS;
400 }
401
402 // The first operand of the shuffle should be the same as the other operand
403 // of the bin op.
404 if (!Shuffle || Shuffle->getOperand(0) != Op)
405 return nullptr;
406
407 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
408 unsigned MaskEnd = 1 << i;
409 for (unsigned Index = 0; Index < MaskEnd; ++Index)
410 if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index))
411 return nullptr;
412 }
413
414 return const_cast<Value *>(Op);
415}
416
417// See if this BO is reachable from this Phi by walking forward through single
418// use BinaryOperators with the same opcode. If we get back then we know we've
419// found a loop and it is safe to step through this Add to find more leaves.
421 // The PHI itself should only have one use.
422 if (!Phi->hasOneUse())
423 return false;
424
425 Instruction *U = cast<Instruction>(*Phi->user_begin());
426 if (U == BO)
427 return true;
428
429 while (U->hasOneUse() && U->getOpcode() == BO->getOpcode())
430 U = cast<Instruction>(*U->user_begin());
431
432 return U == BO;
433}
434
435// Collect all the leaves of the tree of adds that feeds into the horizontal
436// reduction. Root is the Value that is used by the horizontal reduction.
437// We look through single use phis, single use adds, or adds that are used by
438// a phi that forms a loop with the add.
442 Worklist.push_back(Root);
443
444 while (!Worklist.empty()) {
445 Value *V = Worklist.pop_back_val();
446 if (!Visited.insert(V).second)
447 continue;
448
449 if (auto *PN = dyn_cast<PHINode>(V)) {
450 // PHI node should have single use unless it is the root node, then it
451 // has 2 uses.
452 if (!PN->hasNUses(PN == Root ? 2 : 1))
453 break;
454
455 // Push incoming values to the worklist.
456 append_range(Worklist, PN->incoming_values());
457
458 continue;
459 }
460
461 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
462 if (BO->getOpcode() == Instruction::Add) {
463 // Simple case. Single use, just push its operands to the worklist.
464 if (BO->hasNUses(BO == Root ? 2 : 1)) {
465 append_range(Worklist, BO->operands());
466 continue;
467 }
468
469 // If there is additional use, make sure it is an unvisited phi that
470 // gets us back to this node.
471 if (BO->hasNUses(BO == Root ? 3 : 2)) {
472 PHINode *PN = nullptr;
473 for (auto *U : BO->users())
474 if (auto *P = dyn_cast<PHINode>(U))
475 if (!Visited.count(P))
476 PN = P;
477
478 // If we didn't find a 2-input PHI then this isn't a case we can
479 // handle.
480 if (!PN || PN->getNumIncomingValues() != 2)
481 continue;
482
483 // Walk forward from this phi to see if it reaches back to this add.
484 if (!isReachableFromPHI(PN, BO))
485 continue;
486
487 // The phi forms a loop with this Add, push its operands.
488 append_range(Worklist, BO->operands());
489 }
490 }
491 }
492
493 // Not an add or phi, make it a leaf.
494 if (auto *I = dyn_cast<Instruction>(V)) {
495 if (!V->hasNUses(I == Root ? 2 : 1))
496 continue;
497
498 // Add this as a leaf.
499 Leaves.push_back(I);
500 }
501 }
502}
503
504bool X86PartialReduction::run(Function &F) {
505 ST = TM->getSubtargetImpl(F);
506 DL = &F.getDataLayout();
507
508 bool MadeChange = false;
509 for (auto &BB : F) {
510 for (auto &I : BB) {
511 auto *EE = dyn_cast<ExtractElementInst>(&I);
512 if (!EE)
513 continue;
514
515 bool ReduceInOneBB;
516 // First find a reduction tree.
517 // FIXME: Do we need to handle other opcodes than Add?
518 Value *Root = matchAddReduction(*EE, ReduceInOneBB);
519 if (!Root)
520 continue;
521
522 SmallVector<Instruction *, 8> Leaves;
523 collectLeaves(Root, Leaves);
524
525 for (Instruction *I : Leaves) {
526 if (tryMAddReplacement(I, ReduceInOneBB)) {
527 MadeChange = true;
528 continue;
529 }
530
531 // Don't do SAD matching on the root node. SelectionDAG already
532 // has support for that and currently generates better code.
533 if (I != Root && trySADReplacement(I))
534 MadeChange = true;
535 }
536 }
537 }
538
539 return MadeChange;
540}
541
542bool X86PartialReductionLegacy::runOnFunction(Function &F) {
543 if (skipFunction(F))
544 return false;
545
546 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
547 if (!TPC)
548 return false;
549
550 return X86PartialReduction(&TPC->getTM<X86TargetMachine>()).run(F);
551}
552
555 bool Changed = X86PartialReduction(TM).run(F);
556 if (!Changed)
557 return PreservedAnalyses::all();
558
561 return PA;
562}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
This header defines various interfaces for pass management in LLVM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define P(N)
FunctionAnalysisManager FAM
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Target-Independent Code Generator Pass Configuration Options pass.
static constexpr int Concat[]
static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO)
Value * RHS
Value * LHS
BinaryOperator * Mul
if(isa< SExtInst >(LHS)) std auto IsFreeTruncation
static Value * matchAddReduction(const ExtractElementInst &EE, bool &ReduceInOneBB)
static void collectLeaves(Value *Root, SmallVectorImpl< Instruction * > &Leaves)
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
This instruction extracts a single (scalar) element from a VectorType value.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:538
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)
const X86Subtarget * getSubtargetImpl(const Function &F) const override
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
const ParentTy * getParent() const
Definition ilist_node.h:34
Changed
Pass manager infrastructure for declaring and invalidating analyses.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
constexpr double e
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
FunctionPass * createX86PartialReductionLegacyPass()
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
@ SPF_ABS
Floating point maxnum.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869