LLVM 19.0.0git
AggressiveInstCombine.cpp
Go to the documentation of this file.
1//===- AggressiveInstCombine.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the aggressive expression pattern combiner classes.
10// Currently, it handles expression patterns for:
11// * Truncate instruction
12//
13//===----------------------------------------------------------------------===//
14
17#include "llvm/ADT/Statistic.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/Dominators.h"
28#include "llvm/IR/Function.h"
29#include "llvm/IR/IRBuilder.h"
33
34using namespace llvm;
35using namespace PatternMatch;
36
37#define DEBUG_TYPE "aggressive-instcombine"
38
39STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
40STATISTIC(NumGuardedRotates,
41 "Number of guarded rotates transformed into funnel shifts");
42STATISTIC(NumGuardedFunnelShifts,
43 "Number of guarded funnel shifts transformed into funnel shifts");
44STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
45
47 "aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden,
48 cl::desc("Max number of instructions to scan for aggressive instcombine."));
49
50/// Match a pattern for a bitwise funnel/rotate operation that partially guards
51/// against undefined behavior by branching around the funnel-shift/rotation
52/// when the shift amount is 0.
54 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
55 return false;
56
57 // As with the one-use checks below, this is not strictly necessary, but we
58 // are being cautious to avoid potential perf regressions on targets that
59 // do not actually have a funnel/rotate instruction (where the funnel shift
60 // would be expanded back into math/shift/logic ops).
61 if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
62 return false;
63
64 // Match V to funnel shift left/right and capture the source operands and
65 // shift amount.
66 auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
67 Value *&ShAmt) {
68 unsigned Width = V->getType()->getScalarSizeInBits();
69
70 // fshl(ShVal0, ShVal1, ShAmt)
71 // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
72 if (match(V, m_OneUse(m_c_Or(
73 m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
74 m_LShr(m_Value(ShVal1),
75 m_Sub(m_SpecificInt(Width), m_Deferred(ShAmt))))))) {
76 return Intrinsic::fshl;
77 }
78
79 // fshr(ShVal0, ShVal1, ShAmt)
80 // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
81 if (match(V,
83 m_Value(ShAmt))),
84 m_LShr(m_Value(ShVal1), m_Deferred(ShAmt)))))) {
85 return Intrinsic::fshr;
86 }
87
89 };
90
91 // One phi operand must be a funnel/rotate operation, and the other phi
92 // operand must be the source value of that funnel/rotate operation:
93 // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
94 // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
95 // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
96 PHINode &Phi = cast<PHINode>(I);
97 unsigned FunnelOp = 0, GuardOp = 1;
98 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
99 Value *ShVal0, *ShVal1, *ShAmt;
100 Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
101 if (IID == Intrinsic::not_intrinsic ||
102 (IID == Intrinsic::fshl && ShVal0 != P1) ||
103 (IID == Intrinsic::fshr && ShVal1 != P1)) {
104 IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
105 if (IID == Intrinsic::not_intrinsic ||
106 (IID == Intrinsic::fshl && ShVal0 != P0) ||
107 (IID == Intrinsic::fshr && ShVal1 != P0))
108 return false;
109 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
110 "Pattern must match funnel shift left or right");
111 std::swap(FunnelOp, GuardOp);
112 }
113
114 // The incoming block with our source operand must be the "guard" block.
115 // That must contain a cmp+branch to avoid the funnel/rotate when the shift
116 // amount is equal to 0. The other incoming block is the block with the
117 // funnel/rotate.
118 BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
119 BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
120 Instruction *TermI = GuardBB->getTerminator();
121
122 // Ensure that the shift values dominate each block.
123 if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
124 return false;
125
127 BasicBlock *PhiBB = Phi.getParent();
128 if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()),
129 m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
130 return false;
131
132 if (Pred != CmpInst::ICMP_EQ)
133 return false;
134
135 IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
136
137 if (ShVal0 == ShVal1)
138 ++NumGuardedRotates;
139 else
140 ++NumGuardedFunnelShifts;
141
142 // If this is not a rotate then the select was blocking poison from the
143 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
144 bool IsFshl = IID == Intrinsic::fshl;
145 if (ShVal0 != ShVal1) {
146 if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
147 ShVal1 = Builder.CreateFreeze(ShVal1);
148 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
149 ShVal0 = Builder.CreateFreeze(ShVal0);
150 }
151
152 // We matched a variation of this IR pattern:
153 // GuardBB:
154 // %cmp = icmp eq i32 %ShAmt, 0
155 // br i1 %cmp, label %PhiBB, label %FunnelBB
156 // FunnelBB:
157 // %sub = sub i32 32, %ShAmt
158 // %shr = lshr i32 %ShVal1, %sub
159 // %shl = shl i32 %ShVal0, %ShAmt
160 // %fsh = or i32 %shr, %shl
161 // br label %PhiBB
162 // PhiBB:
163 // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
164 // -->
165 // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
166 Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
167 Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
168 return true;
169}
170
171/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
172/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
173/// of 'and' ops, then we also need to capture the fact that we saw an
174/// "and X, 1", so that's an extra return value for that case.
175struct MaskOps {
176 Value *Root = nullptr;
179 bool FoundAnd1 = false;
180
181 MaskOps(unsigned BitWidth, bool MatchAnds)
182 : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}
183};
184
185/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
186/// chain of 'and' or 'or' instructions looking for shift ops of a common source
187/// value. Examples:
188/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
189/// returns { X, 0x129 }
190/// and (and (X >> 1), 1), (X >> 4)
191/// returns { X, 0x12 }
192static bool matchAndOrChain(Value *V, MaskOps &MOps) {
193 Value *Op0, *Op1;
194 if (MOps.MatchAndChain) {
195 // Recurse through a chain of 'and' operands. This requires an extra check
196 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
197 // in the chain to know that all of the high bits are cleared.
198 if (match(V, m_And(m_Value(Op0), m_One()))) {
199 MOps.FoundAnd1 = true;
200 return matchAndOrChain(Op0, MOps);
201 }
202 if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
203 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
204 } else {
205 // Recurse through a chain of 'or' operands.
206 if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
207 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
208 }
209
210 // We need a shift-right or a bare value representing a compare of bit 0 of
211 // the original source operand.
212 Value *Candidate;
213 const APInt *BitIndex = nullptr;
214 if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
215 Candidate = V;
216
217 // Initialize result source operand.
218 if (!MOps.Root)
219 MOps.Root = Candidate;
220
221 // The shift constant is out-of-range? This code hasn't been simplified.
222 if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
223 return false;
224
225 // Fill in the mask bit derived from the shift constant.
226 MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
227 return MOps.Root == Candidate;
228}
229
230/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
231/// These will include a chain of 'or' or 'and'-shifted bits from a
232/// common source value:
233/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
234/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
235/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
236/// that differ only with a final 'not' of the result. We expect that final
237/// 'not' to be folded with the compare that we create here (invert predicate).
239 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
240 // final "and X, 1" instruction must be the final op in the sequence.
241 bool MatchAllBitsSet;
243 MatchAllBitsSet = true;
244 else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
245 MatchAllBitsSet = false;
246 else
247 return false;
248
249 MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
250 if (MatchAllBitsSet) {
251 if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
252 return false;
253 } else {
254 if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
255 return false;
256 }
257
258 // The pattern was found. Create a masked compare that replaces all of the
259 // shift and logic ops.
260 IRBuilder<> Builder(&I);
261 Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
262 Value *And = Builder.CreateAnd(MOps.Root, Mask);
263 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
264 : Builder.CreateIsNotNull(And);
265 Value *Zext = Builder.CreateZExt(Cmp, I.getType());
266 I.replaceAllUsesWith(Zext);
267 ++NumAnyOrAllBitsSet;
268 return true;
269}
270
271// Try to recognize below function as popcount intrinsic.
272// This is the "best" algorithm from
273// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
274// Also used in TargetLowering::expandCTPOP().
275//
276// int popcount(unsigned int i) {
277// i = i - ((i >> 1) & 0x55555555);
278// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
279// i = ((i + (i >> 4)) & 0x0F0F0F0F);
280// return (i * 0x01010101) >> 24;
281// }
283 if (I.getOpcode() != Instruction::LShr)
284 return false;
285
286 Type *Ty = I.getType();
287 if (!Ty->isIntOrIntVectorTy())
288 return false;
289
290 unsigned Len = Ty->getScalarSizeInBits();
291 // FIXME: fix Len == 8 and other irregular type lengths.
292 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
293 return false;
294
295 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
296 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
297 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
298 APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
299 APInt MaskShift = APInt(Len, Len - 8);
300
301 Value *Op0 = I.getOperand(0);
302 Value *Op1 = I.getOperand(1);
303 Value *MulOp0;
304 // Matching "(i * 0x01010101...) >> 24".
305 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
306 match(Op1, m_SpecificInt(MaskShift))) {
307 Value *ShiftOp0;
308 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
309 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
310 m_Deferred(ShiftOp0)),
311 m_SpecificInt(Mask0F)))) {
312 Value *AndOp0;
313 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
314 if (match(ShiftOp0,
315 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
317 m_SpecificInt(Mask33))))) {
318 Value *Root, *SubOp1;
319 // Matching "i - ((i >> 1) & 0x55555555...)".
320 if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
321 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
322 m_SpecificInt(Mask55)))) {
323 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
324 IRBuilder<> Builder(&I);
326 I.getModule(), Intrinsic::ctpop, I.getType());
327 I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
328 ++NumPopCountRecognized;
329 return true;
330 }
331 }
332 }
333 }
334
335 return false;
336}
337
338/// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and
339/// C2 saturate the value of the fp conversion. The transform is not reversable
340/// as the fptosi.sat is more defined than the input - all values produce a
341/// valid value for the fptosi.sat, where as some produce poison for original
342/// that were out of range of the integer conversion. The reversed pattern may
343/// use fmax and fmin instead. As we cannot directly reverse the transform, and
344/// it is not always profitable, we make it conditional on the cost being
345/// reported as lower by TTI.
347 // Look for min(max(fptosi, converting to fptosi_sat.
348 Value *In;
349 const APInt *MinC, *MaxC;
351 m_APInt(MinC))),
352 m_APInt(MaxC))) &&
354 m_APInt(MaxC))),
355 m_APInt(MinC))))
356 return false;
357
358 // Check that the constants clamp a saturate.
359 if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)
360 return false;
361
362 Type *IntTy = I.getType();
363 Type *FpTy = In->getType();
364 Type *SatTy =
365 IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);
366 if (auto *VecTy = dyn_cast<VectorType>(IntTy))
367 SatTy = VectorType::get(SatTy, VecTy->getElementCount());
368
369 // Get the cost of the intrinsic, and check that against the cost of
370 // fptosi+smin+smax
372 IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}),
374 SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy,
377
379 Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None,
381 MinMaxCost += TTI.getIntrinsicInstrCost(
382 IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}),
384 MinMaxCost += TTI.getIntrinsicInstrCost(
385 IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}),
387
388 if (SatCost >= MinMaxCost)
389 return false;
390
391 IRBuilder<> Builder(&I);
392 Function *Fn = Intrinsic::getDeclaration(I.getModule(), Intrinsic::fptosi_sat,
393 {SatTy, FpTy});
394 Value *Sat = Builder.CreateCall(Fn, In);
395 I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
396 return true;
397}
398
399/// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
400/// pessimistic codegen that has to account for setting errno and can enable
401/// vectorization.
404 DominatorTree &DT) {
405 // Match a call to sqrt mathlib function.
406 auto *Call = dyn_cast<CallInst>(&I);
407 if (!Call)
408 return false;
409
410 Module *M = Call->getModule();
411 LibFunc Func;
412 if (!TLI.getLibFunc(*Call, Func) || !isLibFuncEmittable(M, &TLI, Func))
413 return false;
414
415 if (Func != LibFunc_sqrt && Func != LibFunc_sqrtf && Func != LibFunc_sqrtl)
416 return false;
417
418 // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created
419 // (because NNAN or the operand arg must not be less than -0.0) and (2) we
420 // would not end up lowering to a libcall anyway (which could change the value
421 // of errno), then:
422 // (1) errno won't be set.
423 // (2) it is safe to convert this to an intrinsic call.
424 Type *Ty = Call->getType();
425 Value *Arg = Call->getArgOperand(0);
426 if (TTI.haveFastSqrt(Ty) &&
427 (Call->hasNoNaNs() ||
429 Arg, 0, SimplifyQuery(M->getDataLayout(), &TLI, &DT, &AC, &I)))) {
430 IRBuilder<> Builder(&I);
432 Builder.setFastMathFlags(Call->getFastMathFlags());
433
434 Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, Ty);
435 Value *NewSqrt = Builder.CreateCall(Sqrt, Arg, "sqrt");
436 I.replaceAllUsesWith(NewSqrt);
437
438 // Explicitly erase the old call because a call with side effects is not
439 // trivially dead.
440 I.eraseFromParent();
441 return true;
442 }
443
444 return false;
445}
446
447// Check if this array of constants represents a cttz table.
448// Iterate over the elements from \p Table by trying to find/match all
449// the numbers from 0 to \p InputBits that should represent cttz results.
450static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
451 uint64_t Shift, uint64_t InputBits) {
452 unsigned Length = Table.getNumElements();
453 if (Length < InputBits || Length > InputBits * 2)
454 return false;
455
456 APInt Mask = APInt::getBitsSetFrom(InputBits, Shift);
457 unsigned Matched = 0;
458
459 for (unsigned i = 0; i < Length; i++) {
460 uint64_t Element = Table.getElementAsInteger(i);
461 if (Element >= InputBits)
462 continue;
463
464 // Check if \p Element matches a concrete answer. It could fail for some
465 // elements that are never accessed, so we keep iterating over each element
466 // from the table. The number of matched elements should be equal to the
467 // number of potential right answers which is \p InputBits actually.
468 if ((((Mul << Element) & Mask.getZExtValue()) >> Shift) == i)
469 Matched++;
470 }
471
472 return Matched == InputBits;
473}
474
475// Try to recognize table-based ctz implementation.
476// E.g., an example in C (for more cases please see the llvm/tests):
477// int f(unsigned x) {
478// static const char table[32] =
479// {0, 1, 28, 2, 29, 14, 24, 3, 30,
480// 22, 20, 15, 25, 17, 4, 8, 31, 27,
481// 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
482// return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27];
483// }
484// this can be lowered to `cttz` instruction.
485// There is also a special case when the element is 0.
486//
487// Here are some examples or LLVM IR for a 64-bit target:
488//
489// CASE 1:
490// %sub = sub i32 0, %x
491// %and = and i32 %sub, %x
492// %mul = mul i32 %and, 125613361
493// %shr = lshr i32 %mul, 27
494// %idxprom = zext i32 %shr to i64
495// %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0,
496// i64 %idxprom
497// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
498//
499// CASE 2:
500// %sub = sub i32 0, %x
501// %and = and i32 %sub, %x
502// %mul = mul i32 %and, 72416175
503// %shr = lshr i32 %mul, 26
504// %idxprom = zext i32 %shr to i64
505// %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table,
506// i64 0, i64 %idxprom
507// %0 = load i16, i16* %arrayidx, align 2, !tbaa !8
508//
509// CASE 3:
510// %sub = sub i32 0, %x
511// %and = and i32 %sub, %x
512// %mul = mul i32 %and, 81224991
513// %shr = lshr i32 %mul, 27
514// %idxprom = zext i32 %shr to i64
515// %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table,
516// i64 0, i64 %idxprom
517// %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
518//
519// CASE 4:
520// %sub = sub i64 0, %x
521// %and = and i64 %sub, %x
522// %mul = mul i64 %and, 283881067100198605
523// %shr = lshr i64 %mul, 58
524// %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0,
525// i64 %shr
526// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
527//
528// All this can be lowered to @llvm.cttz.i32/64 intrinsic.
530 LoadInst *LI = dyn_cast<LoadInst>(&I);
531 if (!LI)
532 return false;
533
534 Type *AccessType = LI->getType();
535 if (!AccessType->isIntegerTy())
536 return false;
537
538 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getPointerOperand());
539 if (!GEP || !GEP->isInBounds() || GEP->getNumIndices() != 2)
540 return false;
541
542 if (!GEP->getSourceElementType()->isArrayTy())
543 return false;
544
545 uint64_t ArraySize = GEP->getSourceElementType()->getArrayNumElements();
546 if (ArraySize != 32 && ArraySize != 64)
547 return false;
548
549 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
550 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
551 return false;
552
553 ConstantDataArray *ConstData =
554 dyn_cast<ConstantDataArray>(GVTable->getInitializer());
555 if (!ConstData)
556 return false;
557
558 if (!match(GEP->idx_begin()->get(), m_ZeroInt()))
559 return false;
560
561 Value *Idx2 = std::next(GEP->idx_begin())->get();
562 Value *X1;
563 uint64_t MulConst, ShiftConst;
564 // FIXME: 64-bit targets have `i64` type for the GEP index, so this match will
565 // probably fail for other (e.g. 32-bit) targets.
566 if (!match(Idx2, m_ZExtOrSelf(
568 m_ConstantInt(MulConst)),
569 m_ConstantInt(ShiftConst)))))
570 return false;
571
572 unsigned InputBits = X1->getType()->getScalarSizeInBits();
573 if (InputBits != 32 && InputBits != 64)
574 return false;
575
576 // Shift should extract top 5..7 bits.
577 if (InputBits - Log2_32(InputBits) != ShiftConst &&
578 InputBits - Log2_32(InputBits) - 1 != ShiftConst)
579 return false;
580
581 if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits))
582 return false;
583
584 auto ZeroTableElem = ConstData->getElementAsInteger(0);
585 bool DefinedForZero = ZeroTableElem == InputBits;
586
587 IRBuilder<> B(LI);
588 ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
589 Type *XType = X1->getType();
590 auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst});
591 Value *ZExtOrTrunc = nullptr;
592
593 if (DefinedForZero) {
594 ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType);
595 } else {
596 // If the value in elem 0 isn't the same as InputBits, we still want to
597 // produce the value from the table.
598 auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
599 auto Select =
600 B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz);
601
602 // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
603 // it should be handled as: `cttz(x) & (typeSize - 1)`.
604
605 ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
606 }
607
608 LI->replaceAllUsesWith(ZExtOrTrunc);
609
610 return true;
611}
612
613/// This is used by foldLoadsRecursive() to capture a Root Load node which is
614/// of type or(load, load) and recursively build the wide load. Also capture the
615/// shift amount, zero extend type and loadSize.
616struct LoadOps {
617 LoadInst *Root = nullptr;
619 bool FoundRoot = false;
621 const APInt *Shift = nullptr;
624};
625
626// Identify and Merge consecutive loads recursively which is of the form
627// (ZExt(L1) << shift1) | (ZExt(L2) << shift2) -> ZExt(L3) << shift1
628// (ZExt(L1) << shift1) | ZExt(L2) -> ZExt(L3)
629static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL,
630 AliasAnalysis &AA) {
631 const APInt *ShAmt2 = nullptr;
632 Value *X;
633 Instruction *L1, *L2;
634
635 // Go to the last node with loads.
636 if (match(V, m_OneUse(m_c_Or(
637 m_Value(X),
639 m_APInt(ShAmt2)))))) ||
642 if (!foldLoadsRecursive(X, LOps, DL, AA) && LOps.FoundRoot)
643 // Avoid Partial chain merge.
644 return false;
645 } else
646 return false;
647
648 // Check if the pattern has loads
649 LoadInst *LI1 = LOps.Root;
650 const APInt *ShAmt1 = LOps.Shift;
651 if (LOps.FoundRoot == false &&
654 m_APInt(ShAmt1)))))) {
655 LI1 = dyn_cast<LoadInst>(L1);
656 }
657 LoadInst *LI2 = dyn_cast<LoadInst>(L2);
658
659 // Check if loads are same, atomic, volatile and having same address space.
660 if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() ||
662 return false;
663
664 // Check if Loads come from same BB.
665 if (LI1->getParent() != LI2->getParent())
666 return false;
667
668 // Find the data layout
669 bool IsBigEndian = DL.isBigEndian();
670
671 // Check if loads are consecutive and same size.
672 Value *Load1Ptr = LI1->getPointerOperand();
673 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
674 Load1Ptr =
675 Load1Ptr->stripAndAccumulateConstantOffsets(DL, Offset1,
676 /* AllowNonInbounds */ true);
677
678 Value *Load2Ptr = LI2->getPointerOperand();
679 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0);
680 Load2Ptr =
681 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2,
682 /* AllowNonInbounds */ true);
683
684 // Verify if both loads have same base pointers and load sizes are same.
685 uint64_t LoadSize1 = LI1->getType()->getPrimitiveSizeInBits();
686 uint64_t LoadSize2 = LI2->getType()->getPrimitiveSizeInBits();
687 if (Load1Ptr != Load2Ptr || LoadSize1 != LoadSize2)
688 return false;
689
690 // Support Loadsizes greater or equal to 8bits and only power of 2.
691 if (LoadSize1 < 8 || !isPowerOf2_64(LoadSize1))
692 return false;
693
694 // Alias Analysis to check for stores b/w the loads.
695 LoadInst *Start = LOps.FoundRoot ? LOps.RootInsert : LI1, *End = LI2;
696 MemoryLocation Loc;
697 if (!Start->comesBefore(End)) {
698 std::swap(Start, End);
700 if (LOps.FoundRoot)
701 Loc = Loc.getWithNewSize(LOps.LoadSize);
702 } else
704 unsigned NumScanned = 0;
705 for (Instruction &Inst :
706 make_range(Start->getIterator(), End->getIterator())) {
707 if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc)))
708 return false;
709
710 // Ignore debug info so that's not counted against MaxInstrsToScan.
711 // Otherwise debug info could affect codegen.
712 if (!isa<DbgInfoIntrinsic>(Inst) && ++NumScanned > MaxInstrsToScan)
713 return false;
714 }
715
716 // Make sure Load with lower Offset is at LI1
717 bool Reverse = false;
718 if (Offset2.slt(Offset1)) {
719 std::swap(LI1, LI2);
720 std::swap(ShAmt1, ShAmt2);
721 std::swap(Offset1, Offset2);
722 std::swap(Load1Ptr, Load2Ptr);
723 std::swap(LoadSize1, LoadSize2);
724 Reverse = true;
725 }
726
727 // Big endian swap the shifts
728 if (IsBigEndian)
729 std::swap(ShAmt1, ShAmt2);
730
731 // Find Shifts values.
732 uint64_t Shift1 = 0, Shift2 = 0;
733 if (ShAmt1)
734 Shift1 = ShAmt1->getZExtValue();
735 if (ShAmt2)
736 Shift2 = ShAmt2->getZExtValue();
737
738 // First load is always LI1. This is where we put the new load.
739 // Use the merged load size available from LI1 for forward loads.
740 if (LOps.FoundRoot) {
741 if (!Reverse)
742 LoadSize1 = LOps.LoadSize;
743 else
744 LoadSize2 = LOps.LoadSize;
745 }
746
747 // Verify if shift amount and load index aligns and verifies that loads
748 // are consecutive.
749 uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1;
750 uint64_t PrevSize =
751 DL.getTypeStoreSize(IntegerType::get(LI1->getContext(), LoadSize1));
752 if ((Shift2 - Shift1) != ShiftDiff || (Offset2 - Offset1) != PrevSize)
753 return false;
754
755 // Update LOps
756 AAMDNodes AATags1 = LOps.AATags;
757 AAMDNodes AATags2 = LI2->getAAMetadata();
758 if (LOps.FoundRoot == false) {
759 LOps.FoundRoot = true;
760 AATags1 = LI1->getAAMetadata();
761 }
762 LOps.LoadSize = LoadSize1 + LoadSize2;
763 LOps.RootInsert = Start;
764
765 // Concatenate the AATags of the Merged Loads.
766 LOps.AATags = AATags1.concat(AATags2);
767
768 LOps.Root = LI1;
769 LOps.Shift = ShAmt1;
770 LOps.ZextType = X->getType();
771 return true;
772}
773
774// For a given BB instruction, evaluate all loads in the chain that form a
775// pattern which suggests that the loads can be combined. The one and only use
776// of the loads is to form a wider load.
779 const DominatorTree &DT) {
780 // Only consider load chains of scalar values.
781 if (isa<VectorType>(I.getType()))
782 return false;
783
784 LoadOps LOps;
785 if (!foldLoadsRecursive(&I, LOps, DL, AA) || !LOps.FoundRoot)
786 return false;
787
788 IRBuilder<> Builder(&I);
789 LoadInst *NewLoad = nullptr, *LI1 = LOps.Root;
790
791 IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize);
792 // TTI based checks if we want to proceed with wider load
793 bool Allowed = TTI.isTypeLegal(WiderType);
794 if (!Allowed)
795 return false;
796
797 unsigned AS = LI1->getPointerAddressSpace();
798 unsigned Fast = 0;
799 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize,
800 AS, LI1->getAlign(), &Fast);
801 if (!Allowed || !Fast)
802 return false;
803
804 // Get the Index and Ptr for the new GEP.
805 Value *Load1Ptr = LI1->getPointerOperand();
806 Builder.SetInsertPoint(LOps.RootInsert);
807 if (!DT.dominates(Load1Ptr, LOps.RootInsert)) {
808 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
809 Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets(
810 DL, Offset1, /* AllowNonInbounds */ true);
811 Load1Ptr = Builder.CreatePtrAdd(Load1Ptr,
812 Builder.getInt32(Offset1.getZExtValue()));
813 }
814 // Generate wider load.
815 NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),
816 LI1->isVolatile(), "");
817 NewLoad->takeName(LI1);
818 // Set the New Load AATags Metadata.
819 if (LOps.AATags)
820 NewLoad->setAAMetadata(LOps.AATags);
821
822 Value *NewOp = NewLoad;
823 // Check if zero extend needed.
824 if (LOps.ZextType)
825 NewOp = Builder.CreateZExt(NewOp, LOps.ZextType);
826
827 // Check if shift needed. We need to shift with the amount of load1
828 // shift if not zero.
829 if (LOps.Shift)
830 NewOp = Builder.CreateShl(NewOp, ConstantInt::get(I.getContext(), *LOps.Shift));
831 I.replaceAllUsesWith(NewOp);
832
833 return true;
834}
835
836// Calculate GEP Stride and accumulated const ModOffset. Return Stride and
837// ModOffset
838static std::pair<APInt, APInt>
840 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
841 std::optional<APInt> Stride;
842 APInt ModOffset(BW, 0);
843 // Return a minimum gep stride, greatest common divisor of consective gep
844 // index scales(c.f. Bézout's identity).
845 while (auto *GEP = dyn_cast<GEPOperator>(PtrOp)) {
846 MapVector<Value *, APInt> VarOffsets;
847 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset))
848 break;
849
850 for (auto [V, Scale] : VarOffsets) {
851 // Only keep a power of two factor for non-inbounds
852 if (!GEP->isInBounds())
853 Scale = APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
854
855 if (!Stride)
856 Stride = Scale;
857 else
858 Stride = APIntOps::GreatestCommonDivisor(*Stride, Scale);
859 }
860
861 PtrOp = GEP->getPointerOperand();
862 }
863
864 // Check whether pointer arrives back at Global Variable via at least one GEP.
865 // Even if it doesn't, we can check by alignment.
866 if (!isa<GlobalVariable>(PtrOp) || !Stride)
867 return {APInt(BW, 1), APInt(BW, 0)};
868
869 // In consideration of signed GEP indices, non-negligible offset become
870 // remainder of division by minimum GEP stride.
871 ModOffset = ModOffset.srem(*Stride);
872 if (ModOffset.isNegative())
873 ModOffset += *Stride;
874
875 return {*Stride, ModOffset};
876}
877
878/// If C is a constant patterned array and all valid loaded results for given
879/// alignment are same to a constant, return that constant.
881 auto *LI = dyn_cast<LoadInst>(&I);
882 if (!LI || LI->isVolatile())
883 return false;
884
885 // We can only fold the load if it is from a constant global with definitive
886 // initializer. Skip expensive logic if this is not the case.
887 auto *PtrOp = LI->getPointerOperand();
888 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
889 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
890 return false;
891
892 // Bail for large initializers in excess of 4K to avoid too many scans.
893 Constant *C = GV->getInitializer();
894 uint64_t GVSize = DL.getTypeAllocSize(C->getType());
895 if (!GVSize || 4096 < GVSize)
896 return false;
897
898 Type *LoadTy = LI->getType();
899 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
900 auto [Stride, ConstOffset] = getStrideAndModOffsetOfGEP(PtrOp, DL);
901
902 // Any possible offset could be multiple of GEP stride. And any valid
903 // offset is multiple of load alignment, so checking only multiples of bigger
904 // one is sufficient to say results' equality.
905 if (auto LA = LI->getAlign();
906 LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) {
907 ConstOffset = APInt(BW, 0);
908 Stride = APInt(BW, LA.value());
909 }
910
911 Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL);
912 if (!Ca)
913 return false;
914
915 unsigned E = GVSize - DL.getTypeStoreSize(LoadTy);
916 for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride)
917 if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL))
918 return false;
919
920 I.replaceAllUsesWith(Ca);
921
922 return true;
923}
924
925/// This is the entry point for folds that could be implemented in regular
926/// InstCombine, but they are separated because they are not expected to
927/// occur frequently and/or have more than a constant-length pattern match.
931 AssumptionCache &AC) {
932 bool MadeChange = false;
933 for (BasicBlock &BB : F) {
934 // Ignore unreachable basic blocks.
935 if (!DT.isReachableFromEntry(&BB))
936 continue;
937
938 const DataLayout &DL = F.getParent()->getDataLayout();
939
940 // Walk the block backwards for efficiency. We're matching a chain of
941 // use->defs, so we're more likely to succeed by starting from the bottom.
942 // Also, we want to avoid matching partial patterns.
943 // TODO: It would be more efficient if we removed dead instructions
944 // iteratively in this loop rather than waiting until the end.
946 MadeChange |= foldAnyOrAllBitsSet(I);
947 MadeChange |= foldGuardedFunnelShift(I, DT);
948 MadeChange |= tryToRecognizePopCount(I);
949 MadeChange |= tryToFPToSat(I, TTI);
950 MadeChange |= tryToRecognizeTableBasedCttz(I);
951 MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
952 MadeChange |= foldPatternedLoads(I, DL);
953 // NOTE: This function introduces erasing of the instruction `I`, so it
954 // needs to be called at the end of this sequence, otherwise we may make
955 // bugs.
956 MadeChange |= foldSqrt(I, TTI, TLI, AC, DT);
957 }
958 }
959
960 // We're done with transforms, so remove dead instructions.
961 if (MadeChange)
962 for (BasicBlock &BB : F)
964
965 return MadeChange;
966}
967
968/// This is the entry point for all transforms. Pass manager differences are
969/// handled in the callers of this function.
972 AliasAnalysis &AA) {
973 bool MadeChange = false;
974 const DataLayout &DL = F.getParent()->getDataLayout();
975 TruncInstCombine TIC(AC, TLI, DL, DT);
976 MadeChange |= TIC.run(F);
977 MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI, AA, AC);
978 return MadeChange;
979}
980
983 auto &AC = AM.getResult<AssumptionAnalysis>(F);
984 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
985 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
986 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
987 auto &AA = AM.getResult<AAManager>(F);
988 if (!runImpl(F, AC, TTI, TLI, DT, AA)) {
989 // No changes, all analyses are preserved.
990 return PreservedAnalyses::all();
991 }
992 // Mark all the analyses that instcombine updates as preserved.
995 return PA;
996}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
static bool tryToRecognizePopCount(Instruction &I)
static bool foldSqrt(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT)
Try to replace a mathlib call to sqrt with the LLVM intrinsic.
static bool foldAnyOrAllBitsSet(Instruction &I)
Match patterns that correspond to "any-bits-set" and "all-bits-set".
static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI)
Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and C2 saturate the value of t...
static bool runImpl(Function &F, AssumptionCache &AC, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, DominatorTree &DT, AliasAnalysis &AA)
This is the entry point for all transforms.
static bool matchAndOrChain(Value *V, MaskOps &MOps)
This is a recursive helper for foldAnyOrAllBitsSet() that walks through a chain of 'and' or 'or' inst...
static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool tryToRecognizeTableBasedCttz(Instruction &I)
static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT)
Match a pattern for a bitwise funnel/rotate operation that partially guards against undefined behavio...
static bool foldUnusualPatterns(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AliasAnalysis &AA, AssumptionCache &AC)
This is the entry point for folds that could be implemented in regular InstCombine,...
static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))
static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, AliasAnalysis &AA)
static std::pair< APInt, APInt > getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL)
static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul, uint64_t Shift, uint64_t InputBits)
static bool foldPatternedLoads(Instruction &I, const DataLayout &DL)
If C is a constant patterned array and all valid loaded results for given alignment are same to a con...
AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool runImpl(Function &F, const TargetLowering &TLI)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:530
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
This pass exposes codegen information to IR-level passes.
BinaryOperator * Mul
A manager for alias analyses.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:620
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1742
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1108
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition: APInt.h:264
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1199
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:396
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:70
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:691
uint64_t getElementAsInteger(unsigned i) const
If this is a sequential container of integers (of any size), return the specified element in the low ...
Definition: Constants.cpp:3001
unsigned getNumElements() const
Return the number of elements in the array or vector.
Definition: Constants.cpp:2744
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
This is an important base class in LLVM.
Definition: Constant.h:41
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Definition: IRBuilder.h:1977
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1806
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2022
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2518
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:305
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:480
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2224
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1410
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2010
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1469
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2532
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2649
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1718
const BasicBlock * getParent() const
Definition: Instruction.h:151
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1704
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
An instruction for reading from memory.
Definition: Instructions.h:184
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:286
Value * getPointerOperand()
Definition: Instructions.h:280
bool isSimple() const
Definition: Instructions.h:272
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Representation for a specific memory location.
MemoryLocation getWithNewSize(LocationSize NewSize) const
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:144
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_RecipThroughput
Reciprocal throughput.
bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
@ None
The cast is not used with a load/store of any kind.
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:767
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1451
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:903
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:765
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:821
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:163
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:541
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:839
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:548
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
specific_bbval m_SpecificBB(BasicBlock *BB)
Match a specific basic block value.
Definition: PatternMatch.h:936
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:294
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Length
Definition: DWP.cpp:456
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:665
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:724
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:269
bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
@ And
Bitwise or logical AND of integers.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
const APInt * Shift
LoadInst * RootInsert
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
MaskOps(unsigned BitWidth, bool MatchAnds)
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
AAMDNodes concat(const AAMDNodes &Other) const
Determine the best AAMDNodes after concatenating two different locations together.