LLVM 22.0.0git
ValueTracking.cpp
Go to the documentation of this file.
1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/ScopeExit.h"
23#include "llvm/ADT/StringRef.h"
33#include "llvm/Analysis/Loads.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constant.h"
44#include "llvm/IR/Constants.h"
47#include "llvm/IR/Dominators.h"
49#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalAlias.h"
52#include "llvm/IR/GlobalValue.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/IntrinsicsAArch64.h"
60#include "llvm/IR/IntrinsicsAMDGPU.h"
61#include "llvm/IR/IntrinsicsRISCV.h"
62#include "llvm/IR/IntrinsicsX86.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/Metadata.h"
65#include "llvm/IR/Module.h"
66#include "llvm/IR/Operator.h"
68#include "llvm/IR/Type.h"
69#include "llvm/IR/User.h"
70#include "llvm/IR/Value.h"
79#include <algorithm>
80#include <cassert>
81#include <cstdint>
82#include <optional>
83#include <utility>
84
85using namespace llvm;
86using namespace llvm::PatternMatch;
87
88// Controls the number of uses of the value searched for possible
89// dominating comparisons.
90static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
91 cl::Hidden, cl::init(20));
92
93/// Maximum number of instructions to check between assume and context
94/// instruction.
95static constexpr unsigned MaxInstrsToCheckForFree = 16;
96
97/// Returns the bitwidth of the given scalar or pointer type. For vector types,
98/// returns the element type's bitwidth.
99static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
100 if (unsigned BitWidth = Ty->getScalarSizeInBits())
101 return BitWidth;
102
103 return DL.getPointerTypeSizeInBits(Ty);
104}
105
106// Given the provided Value and, potentially, a context instruction, return
107// the preferred context instruction (if any).
108static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
109 // If we've been provided with a context instruction, then use that (provided
110 // it has been inserted).
111 if (CxtI && CxtI->getParent())
112 return CxtI;
113
114 // If the value is really an already-inserted instruction, then use that.
115 CxtI = dyn_cast<Instruction>(V);
116 if (CxtI && CxtI->getParent())
117 return CxtI;
118
119 return nullptr;
120}
121
123 const APInt &DemandedElts,
124 APInt &DemandedLHS, APInt &DemandedRHS) {
125 if (isa<ScalableVectorType>(Shuf->getType())) {
126 assert(DemandedElts == APInt(1,1));
127 DemandedLHS = DemandedRHS = DemandedElts;
128 return true;
129 }
130
131 int NumElts =
132 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
133 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
134 DemandedElts, DemandedLHS, DemandedRHS);
135}
136
137static void computeKnownBits(const Value *V, const APInt &DemandedElts,
138 KnownBits &Known, const SimplifyQuery &Q,
139 unsigned Depth);
140
142 const SimplifyQuery &Q, unsigned Depth) {
143 // Since the number of lanes in a scalable vector is unknown at compile time,
144 // we track one bit which is implicitly broadcast to all lanes. This means
145 // that all lanes in a scalable vector are considered demanded.
146 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
147 APInt DemandedElts =
148 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
149 ::computeKnownBits(V, DemandedElts, Known, Q, Depth);
150}
151
153 const DataLayout &DL, AssumptionCache *AC,
154 const Instruction *CxtI, const DominatorTree *DT,
155 bool UseInstrInfo, unsigned Depth) {
156 computeKnownBits(V, Known,
157 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo),
158 Depth);
159}
160
162 AssumptionCache *AC, const Instruction *CxtI,
163 const DominatorTree *DT, bool UseInstrInfo,
164 unsigned Depth) {
165 return computeKnownBits(
166 V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
167}
168
169KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
170 const DataLayout &DL, AssumptionCache *AC,
171 const Instruction *CxtI,
172 const DominatorTree *DT, bool UseInstrInfo,
173 unsigned Depth) {
174 return computeKnownBits(
175 V, DemandedElts,
176 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
177}
178
180 const SimplifyQuery &SQ) {
181 // Look for an inverted mask: (X & ~M) op (Y & M).
182 {
183 Value *M;
184 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
186 isGuaranteedNotToBeUndef(M, SQ.AC, SQ.CxtI, SQ.DT))
187 return true;
188 }
189
190 // X op (Y & ~X)
193 return true;
194
195 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
196 // for constant Y.
197 Value *Y;
198 if (match(RHS,
200 isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT) &&
201 isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
202 return true;
203
204 // Peek through extends to find a 'not' of the other side:
205 // (ext Y) op ext(~Y)
206 if (match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
208 isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
209 return true;
210
211 // Look for: (A & B) op ~(A | B)
212 {
213 Value *A, *B;
214 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
216 isGuaranteedNotToBeUndef(A, SQ.AC, SQ.CxtI, SQ.DT) &&
217 isGuaranteedNotToBeUndef(B, SQ.AC, SQ.CxtI, SQ.DT))
218 return true;
219 }
220
221 // Look for: (X << V) op (Y >> (BitWidth - V))
222 // or (X >> V) op (Y << (BitWidth - V))
223 {
224 const Value *V;
225 const APInt *R;
226 if (((match(RHS, m_Shl(m_Value(), m_Sub(m_APInt(R), m_Value(V)))) &&
227 match(LHS, m_LShr(m_Value(), m_Specific(V)))) ||
228 (match(RHS, m_LShr(m_Value(), m_Sub(m_APInt(R), m_Value(V)))) &&
229 match(LHS, m_Shl(m_Value(), m_Specific(V))))) &&
230 R->uge(LHS->getType()->getScalarSizeInBits()))
231 return true;
232 }
233
234 return false;
235}
236
238 const WithCache<const Value *> &RHSCache,
239 const SimplifyQuery &SQ) {
240 const Value *LHS = LHSCache.getValue();
241 const Value *RHS = RHSCache.getValue();
242
243 assert(LHS->getType() == RHS->getType() &&
244 "LHS and RHS should have the same type");
245 assert(LHS->getType()->isIntOrIntVectorTy() &&
246 "LHS and RHS should be integers");
247
248 if (haveNoCommonBitsSetSpecialCases(LHS, RHS, SQ) ||
250 return true;
251
253 RHSCache.getKnownBits(SQ));
254}
255
257 return !I->user_empty() &&
258 all_of(I->users(), match_fn(m_ICmp(m_Value(), m_Zero())));
259}
260
262 return !I->user_empty() && all_of(I->users(), [](const User *U) {
263 CmpPredicate P;
264 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
265 });
266}
267
269 bool OrZero, AssumptionCache *AC,
270 const Instruction *CxtI,
271 const DominatorTree *DT, bool UseInstrInfo,
272 unsigned Depth) {
273 return ::isKnownToBeAPowerOfTwo(
274 V, OrZero, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo),
275 Depth);
276}
277
278static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
279 const SimplifyQuery &Q, unsigned Depth);
280
282 unsigned Depth) {
283 return computeKnownBits(V, SQ, Depth).isNonNegative();
284}
285
287 unsigned Depth) {
288 if (auto *CI = dyn_cast<ConstantInt>(V))
289 return CI->getValue().isStrictlyPositive();
290
291 // If `isKnownNonNegative` ever becomes more sophisticated, make sure to keep
292 // this updated.
293 KnownBits Known = computeKnownBits(V, SQ, Depth);
294 return Known.isNonNegative() &&
295 (Known.isNonZero() || isKnownNonZero(V, SQ, Depth));
296}
297
299 unsigned Depth) {
300 return computeKnownBits(V, SQ, Depth).isNegative();
301}
302
303static bool isKnownNonEqual(const Value *V1, const Value *V2,
304 const APInt &DemandedElts, const SimplifyQuery &Q,
305 unsigned Depth);
306
307bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
308 const SimplifyQuery &Q, unsigned Depth) {
309 // We don't support looking through casts.
310 if (V1 == V2 || V1->getType() != V2->getType())
311 return false;
312 auto *FVTy = dyn_cast<FixedVectorType>(V1->getType());
313 APInt DemandedElts =
314 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
315 return ::isKnownNonEqual(V1, V2, DemandedElts, Q, Depth);
316}
317
318bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
319 const SimplifyQuery &SQ, unsigned Depth) {
320 KnownBits Known(Mask.getBitWidth());
321 computeKnownBits(V, Known, SQ, Depth);
322 return Mask.isSubsetOf(Known.Zero);
323}
324
325static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
326 const SimplifyQuery &Q, unsigned Depth);
327
328static unsigned ComputeNumSignBits(const Value *V, const SimplifyQuery &Q,
329 unsigned Depth = 0) {
330 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
331 APInt DemandedElts =
332 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
333 return ComputeNumSignBits(V, DemandedElts, Q, Depth);
334}
335
336unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
337 AssumptionCache *AC, const Instruction *CxtI,
338 const DominatorTree *DT, bool UseInstrInfo,
339 unsigned Depth) {
340 return ::ComputeNumSignBits(
341 V, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo), Depth);
342}
343
345 AssumptionCache *AC,
346 const Instruction *CxtI,
347 const DominatorTree *DT,
348 unsigned Depth) {
349 unsigned SignBits = ComputeNumSignBits(V, DL, AC, CxtI, DT, Depth);
350 return V->getType()->getScalarSizeInBits() - SignBits + 1;
351}
352
353/// Try to detect the lerp pattern: a * (b - c) + c * d
354/// where a >= 0, b >= 0, c >= 0, d >= 0, and b >= c.
355///
356/// In that particular case, we can use the following chain of reasoning:
357///
358/// a * (b - c) + c * d <= a' * (b - c) + a' * c = a' * b where a' = max(a, d)
359///
360/// Since that is true for arbitrary a, b, c and d within our constraints, we
361/// can conclude that:
362///
363/// max(a * (b - c) + c * d) <= max(max(a), max(d)) * max(b) = U
364///
365/// Considering that any result of the lerp would be less or equal to U, it
366/// would have at least the number of leading 0s as in U.
367///
368/// While being quite a specific situation, it is fairly common in computer
369/// graphics in the shape of alpha blending.
370///
371/// Modifies given KnownOut in-place with the inferred information.
372static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1,
373 const APInt &DemandedElts,
374 KnownBits &KnownOut,
375 const SimplifyQuery &Q,
376 unsigned Depth) {
377
378 Type *Ty = Op0->getType();
379 const unsigned BitWidth = Ty->getScalarSizeInBits();
380
381 // Only handle scalar types for now
382 if (Ty->isVectorTy())
383 return;
384
385 // Try to match: a * (b - c) + c * d.
386 // When a == 1 => A == nullptr, the same applies to d/D as well.
387 const Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
388 const Instruction *SubBC = nullptr;
389
390 const auto MatchSubBC = [&]() {
391 // (b - c) can have two forms that interest us:
392 //
393 // 1. sub nuw %b, %c
394 // 2. xor %c, %b
395 //
396 // For the first case, nuw flag guarantees our requirement b >= c.
397 //
398 // The second case might happen when the analysis can infer that b is a mask
399 // for c and we can transform sub operation into xor (that is usually true
400 // for constant b's). Even though xor is symmetrical, canonicalization
401 // ensures that the constant will be the RHS. We have additional checks
402 // later on to ensure that this xor operation is equivalent to subtraction.
404 m_Xor(m_Value(C), m_Value(B))));
405 };
406
407 const auto MatchASubBC = [&]() {
408 // Cases:
409 // - a * (b - c)
410 // - (b - c) * a
411 // - (b - c) <- a implicitly equals 1
412 return m_CombineOr(m_c_Mul(m_Value(A), MatchSubBC()), MatchSubBC());
413 };
414
415 const auto MatchCD = [&]() {
416 // Cases:
417 // - d * c
418 // - c * d
419 // - c <- d implicitly equals 1
421 };
422
423 const auto Match = [&](const Value *LHS, const Value *RHS) {
424 // We do use m_Specific(C) in MatchCD, so we have to make sure that
425 // it's bound to anything and match(LHS, MatchASubBC()) absolutely
426 // has to evaluate first and return true.
427 //
428 // If Match returns true, it is guaranteed that B != nullptr, C != nullptr.
429 return match(LHS, MatchASubBC()) && match(RHS, MatchCD());
430 };
431
432 if (!Match(Op0, Op1) && !Match(Op1, Op0))
433 return;
434
435 const auto ComputeKnownBitsOrOne = [&](const Value *V) {
436 // For some of the values we use the convention of leaving
437 // it nullptr to signify an implicit constant 1.
438 return V ? computeKnownBits(V, DemandedElts, Q, Depth + 1)
440 };
441
442 // Check that all operands are non-negative
443 const KnownBits KnownA = ComputeKnownBitsOrOne(A);
444 if (!KnownA.isNonNegative())
445 return;
446
447 const KnownBits KnownD = ComputeKnownBitsOrOne(D);
448 if (!KnownD.isNonNegative())
449 return;
450
451 const KnownBits KnownB = computeKnownBits(B, DemandedElts, Q, Depth + 1);
452 if (!KnownB.isNonNegative())
453 return;
454
455 const KnownBits KnownC = computeKnownBits(C, DemandedElts, Q, Depth + 1);
456 if (!KnownC.isNonNegative())
457 return;
458
459 // If we matched subtraction as xor, we need to actually check that xor
460 // is semantically equivalent to subtraction.
461 //
462 // For that to be true, b has to be a mask for c or that b's known
463 // ones cover all known and possible ones of c.
464 if (SubBC->getOpcode() == Instruction::Xor &&
465 !KnownC.getMaxValue().isSubsetOf(KnownB.getMinValue()))
466 return;
467
468 const APInt MaxA = KnownA.getMaxValue();
469 const APInt MaxD = KnownD.getMaxValue();
470 const APInt MaxAD = APIntOps::umax(MaxA, MaxD);
471 const APInt MaxB = KnownB.getMaxValue();
472
473 // We can't infer leading zeros info if the upper-bound estimate wraps.
474 bool Overflow;
475 const APInt UpperBound = MaxAD.umul_ov(MaxB, Overflow);
476
477 if (Overflow)
478 return;
479
480 // If we know that x <= y and both are positive than x has at least the same
481 // number of leading zeros as y.
482 const unsigned MinimumNumberOfLeadingZeros = UpperBound.countl_zero();
483 KnownOut.Zero.setHighBits(MinimumNumberOfLeadingZeros);
484}
485
486static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
487 bool NSW, bool NUW,
488 const APInt &DemandedElts,
489 KnownBits &KnownOut, KnownBits &Known2,
490 const SimplifyQuery &Q, unsigned Depth) {
491 computeKnownBits(Op1, DemandedElts, KnownOut, Q, Depth + 1);
492
493 // If one operand is unknown and we have no nowrap information,
494 // the result will be unknown independently of the second operand.
495 if (KnownOut.isUnknown() && !NSW && !NUW)
496 return;
497
498 computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1);
499 KnownOut = KnownBits::computeForAddSub(Add, NSW, NUW, Known2, KnownOut);
500
501 if (!Add && NSW && !KnownOut.isNonNegative() &&
503 .value_or(false))
504 KnownOut.makeNonNegative();
505
506 if (Add)
507 // Try to match lerp pattern and combine results
508 computeKnownBitsFromLerpPattern(Op0, Op1, DemandedElts, KnownOut, Q, Depth);
509}
510
511static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
512 bool NUW, const APInt &DemandedElts,
513 KnownBits &Known, KnownBits &Known2,
514 const SimplifyQuery &Q, unsigned Depth) {
515 computeKnownBits(Op1, DemandedElts, Known, Q, Depth + 1);
516 computeKnownBits(Op0, DemandedElts, Known2, Q, Depth + 1);
517
518 bool isKnownNegative = false;
519 bool isKnownNonNegative = false;
520 // If the multiplication is known not to overflow, compute the sign bit.
521 if (NSW) {
522 if (Op0 == Op1) {
523 // The product of a number with itself is non-negative.
524 isKnownNonNegative = true;
525 } else {
526 bool isKnownNonNegativeOp1 = Known.isNonNegative();
527 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
528 bool isKnownNegativeOp1 = Known.isNegative();
529 bool isKnownNegativeOp0 = Known2.isNegative();
530 // The product of two numbers with the same sign is non-negative.
531 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
532 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
533 if (!isKnownNonNegative && NUW) {
534 // mul nuw nsw with a factor > 1 is non-negative.
536 isKnownNonNegative = KnownBits::sgt(Known, One).value_or(false) ||
537 KnownBits::sgt(Known2, One).value_or(false);
538 }
539
540 // The product of a negative number and a non-negative number is either
541 // negative or zero.
544 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
545 Known2.isNonZero()) ||
546 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
547 }
548 }
549
550 bool SelfMultiply = Op0 == Op1;
551 if (SelfMultiply)
552 SelfMultiply &=
553 isGuaranteedNotToBeUndef(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
554 Known = KnownBits::mul(Known, Known2, SelfMultiply);
555
556 if (SelfMultiply) {
557 unsigned SignBits = ComputeNumSignBits(Op0, DemandedElts, Q, Depth + 1);
558 unsigned TyBits = Op0->getType()->getScalarSizeInBits();
559 unsigned OutValidBits = 2 * (TyBits - SignBits + 1);
560
561 if (OutValidBits < TyBits) {
562 APInt KnownZeroMask =
563 APInt::getHighBitsSet(TyBits, TyBits - OutValidBits + 1);
564 Known.Zero |= KnownZeroMask;
565 }
566 }
567
568 // Only make use of no-wrap flags if we failed to compute the sign bit
569 // directly. This matters if the multiplication always overflows, in
570 // which case we prefer to follow the result of the direct computation,
571 // though as the program is invoking undefined behaviour we can choose
572 // whatever we like here.
573 if (isKnownNonNegative && !Known.isNegative())
574 Known.makeNonNegative();
575 else if (isKnownNegative && !Known.isNonNegative())
576 Known.makeNegative();
577}
578
580 KnownBits &Known) {
581 unsigned BitWidth = Known.getBitWidth();
582 unsigned NumRanges = Ranges.getNumOperands() / 2;
583 assert(NumRanges >= 1);
584
585 Known.setAllConflict();
586
587 for (unsigned i = 0; i < NumRanges; ++i) {
589 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
591 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
592 ConstantRange Range(Lower->getValue(), Upper->getValue());
593 // BitWidth must equal the Ranges BitWidth for the correct number of high
594 // bits to be set.
595 assert(BitWidth == Range.getBitWidth() &&
596 "Known bit width must match range bit width!");
597
598 // The first CommonPrefixBits of all values in Range are equal.
599 unsigned CommonPrefixBits =
600 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero();
601 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
602 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
603 Known.One &= UnsignedMax & Mask;
604 Known.Zero &= ~UnsignedMax & Mask;
605 }
606}
607
608static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
612
613 // The instruction defining an assumption's condition itself is always
614 // considered ephemeral to that assumption (even if it has other
615 // non-ephemeral users). See r246696's test case for an example.
616 if (is_contained(I->operands(), E))
617 return true;
618
619 while (!WorkSet.empty()) {
620 const Instruction *V = WorkSet.pop_back_val();
621 if (!Visited.insert(V).second)
622 continue;
623
624 // If all uses of this value are ephemeral, then so is this value.
625 if (all_of(V->users(), [&](const User *U) {
626 return EphValues.count(cast<Instruction>(U));
627 })) {
628 if (V == E)
629 return true;
630
631 if (V == I || (!V->mayHaveSideEffects() && !V->isTerminator())) {
632 EphValues.insert(V);
633
634 if (const User *U = dyn_cast<User>(V)) {
635 for (const Use &U : U->operands()) {
636 if (const auto *I = dyn_cast<Instruction>(U.get()))
637 WorkSet.push_back(I);
638 }
639 }
640 }
641 }
642 }
643
644 return false;
645}
646
647// Is this an intrinsic that cannot be speculated but also cannot trap?
649 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
650 return CI->isAssumeLikeIntrinsic();
651
652 return false;
653}
654
656 const Instruction *CxtI,
657 const DominatorTree *DT,
658 bool AllowEphemerals) {
659 // There are two restrictions on the use of an assume:
660 // 1. The assume must dominate the context (or the control flow must
661 // reach the assume whenever it reaches the context).
662 // 2. The context must not be in the assume's set of ephemeral values
663 // (otherwise we will use the assume to prove that the condition
664 // feeding the assume is trivially true, thus causing the removal of
665 // the assume).
666
667 if (Inv->getParent() == CxtI->getParent()) {
668 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
669 // in the BB.
670 if (Inv->comesBefore(CxtI))
671 return true;
672
673 // Don't let an assume affect itself - this would cause the problems
674 // `isEphemeralValueOf` is trying to prevent, and it would also make
675 // the loop below go out of bounds.
676 if (!AllowEphemerals && Inv == CxtI)
677 return false;
678
679 // The context comes first, but they're both in the same block.
680 // Make sure there is nothing in between that might interrupt
681 // the control flow, not even CxtI itself.
682 // We limit the scan distance between the assume and its context instruction
683 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
684 // it can be adjusted if needed (could be turned into a cl::opt).
685 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
687 return false;
688
689 return AllowEphemerals || !isEphemeralValueOf(Inv, CxtI);
690 }
691
692 // Inv and CxtI are in different blocks.
693 if (DT) {
694 if (DT->dominates(Inv, CxtI))
695 return true;
696 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor() ||
697 Inv->getParent()->isEntryBlock()) {
698 // We don't have a DT, but this trivially dominates.
699 return true;
700 }
701
702 return false;
703}
704
706 const Instruction *CtxI) {
707 // Helper to check if there are any calls in the range that may free memory.
708 auto hasNoFreeCalls = [](auto Range) {
709 for (const auto &[Idx, I] : enumerate(Range)) {
710 if (Idx > MaxInstrsToCheckForFree)
711 return false;
712 if (const auto *CB = dyn_cast<CallBase>(&I))
713 if (!CB->hasFnAttr(Attribute::NoFree))
714 return false;
715 }
716 return true;
717 };
718
719 // Make sure the current function cannot arrange for another thread to free on
720 // its behalf.
721 if (!CtxI->getFunction()->hasNoSync())
722 return false;
723
724 // Handle cross-block case: CtxI in a successor of Assume's block.
725 const BasicBlock *CtxBB = CtxI->getParent();
726 const BasicBlock *AssumeBB = Assume->getParent();
727 BasicBlock::const_iterator CtxIter = CtxI->getIterator();
728 if (CtxBB != AssumeBB) {
729 if (CtxBB->getSinglePredecessor() != AssumeBB)
730 return false;
731
732 if (!hasNoFreeCalls(make_range(CtxBB->begin(), CtxIter)))
733 return false;
734
735 CtxIter = AssumeBB->end();
736 } else {
737 // Same block case: check that Assume comes before CtxI.
738 if (!Assume->comesBefore(CtxI))
739 return false;
740 }
741
742 // Check if there are any calls between Assume and CtxIter that may free
743 // memory.
744 return hasNoFreeCalls(make_range(Assume->getIterator(), CtxIter));
745}
746
747// TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
748// we still have enough information about `RHS` to conclude non-zero. For
749// example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops
750// so the extra compile time may not be worth it, but possibly a second API
751// should be created for use outside of loops.
752static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
753 // v u> y implies v != 0.
754 if (Pred == ICmpInst::ICMP_UGT)
755 return true;
756
757 // Special-case v != 0 to also handle v != null.
758 if (Pred == ICmpInst::ICMP_NE)
759 return match(RHS, m_Zero());
760
761 // All other predicates - rely on generic ConstantRange handling.
762 const APInt *C;
763 auto Zero = APInt::getZero(RHS->getType()->getScalarSizeInBits());
764 if (match(RHS, m_APInt(C))) {
766 return !TrueValues.contains(Zero);
767 }
768
770 if (VC == nullptr)
771 return false;
772
773 for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
774 ++ElemIdx) {
776 Pred, VC->getElementAsAPInt(ElemIdx));
777 if (TrueValues.contains(Zero))
778 return false;
779 }
780 return true;
781}
782
783static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI,
784 Value *&ValOut, Instruction *&CtxIOut,
785 const PHINode **PhiOut = nullptr) {
786 ValOut = U->get();
787 if (ValOut == PHI)
788 return;
789 CtxIOut = PHI->getIncomingBlock(*U)->getTerminator();
790 if (PhiOut)
791 *PhiOut = PHI;
792 Value *V;
793 // If the Use is a select of this phi, compute analysis on other arm to break
794 // recursion.
795 // TODO: Min/Max
796 if (match(ValOut, m_Select(m_Value(), m_Specific(PHI), m_Value(V))) ||
797 match(ValOut, m_Select(m_Value(), m_Value(V), m_Specific(PHI))))
798 ValOut = V;
799
800 // Same for select, if this phi is 2-operand phi, compute analysis on other
801 // incoming value to break recursion.
802 // TODO: We could handle any number of incoming edges as long as we only have
803 // two unique values.
804 if (auto *IncPhi = dyn_cast<PHINode>(ValOut);
805 IncPhi && IncPhi->getNumIncomingValues() == 2) {
806 for (int Idx = 0; Idx < 2; ++Idx) {
807 if (IncPhi->getIncomingValue(Idx) == PHI) {
808 ValOut = IncPhi->getIncomingValue(1 - Idx);
809 if (PhiOut)
810 *PhiOut = IncPhi;
811 CtxIOut = IncPhi->getIncomingBlock(1 - Idx)->getTerminator();
812 break;
813 }
814 }
815 }
816}
817
818static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) {
819 // Use of assumptions is context-sensitive. If we don't have a context, we
820 // cannot use them!
821 if (!Q.AC || !Q.CxtI)
822 return false;
823
824 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) {
825 if (!Elem.Assume)
826 continue;
827
828 AssumeInst *I = cast<AssumeInst>(Elem.Assume);
829 assert(I->getFunction() == Q.CxtI->getFunction() &&
830 "Got assumption for the wrong function!");
831
832 if (Elem.Index != AssumptionCache::ExprResultIdx) {
833 if (!V->getType()->isPointerTy())
834 continue;
836 *I, I->bundle_op_info_begin()[Elem.Index])) {
837 if (RK.WasOn == V &&
838 (RK.AttrKind == Attribute::NonNull ||
839 (RK.AttrKind == Attribute::Dereferenceable &&
841 V->getType()->getPointerAddressSpace()))) &&
843 return true;
844 }
845 continue;
846 }
847
848 // Warning: This loop can end up being somewhat performance sensitive.
849 // We're running this loop for once for each value queried resulting in a
850 // runtime of ~O(#assumes * #values).
851
852 Value *RHS;
853 CmpPredicate Pred;
854 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
855 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
856 continue;
857
858 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
859 return true;
860 }
861
862 return false;
863}
864
866 Value *LHS, Value *RHS, KnownBits &Known,
867 const SimplifyQuery &Q) {
868 if (RHS->getType()->isPointerTy()) {
869 // Handle comparison of pointer to null explicitly, as it will not be
870 // covered by the m_APInt() logic below.
871 if (LHS == V && match(RHS, m_Zero())) {
872 switch (Pred) {
874 Known.setAllZero();
875 break;
878 Known.makeNonNegative();
879 break;
881 Known.makeNegative();
882 break;
883 default:
884 break;
885 }
886 }
887 return;
888 }
889
890 unsigned BitWidth = Known.getBitWidth();
891 auto m_V =
893
894 Value *Y;
895 const APInt *Mask, *C;
896 if (!match(RHS, m_APInt(C)))
897 return;
898
899 uint64_t ShAmt;
900 switch (Pred) {
902 // assume(V = C)
903 if (match(LHS, m_V)) {
904 Known = Known.unionWith(KnownBits::makeConstant(*C));
905 // assume(V & Mask = C)
906 } else if (match(LHS, m_c_And(m_V, m_Value(Y)))) {
907 // For one bits in Mask, we can propagate bits from C to V.
908 Known.One |= *C;
909 if (match(Y, m_APInt(Mask)))
910 Known.Zero |= ~*C & *Mask;
911 // assume(V | Mask = C)
912 } else if (match(LHS, m_c_Or(m_V, m_Value(Y)))) {
913 // For zero bits in Mask, we can propagate bits from C to V.
914 Known.Zero |= ~*C;
915 if (match(Y, m_APInt(Mask)))
916 Known.One |= *C & ~*Mask;
917 // assume(V << ShAmt = C)
918 } else if (match(LHS, m_Shl(m_V, m_ConstantInt(ShAmt))) &&
919 ShAmt < BitWidth) {
920 // For those bits in C that are known, we can propagate them to known
921 // bits in V shifted to the right by ShAmt.
923 RHSKnown >>= ShAmt;
924 Known = Known.unionWith(RHSKnown);
925 // assume(V >> ShAmt = C)
926 } else if (match(LHS, m_Shr(m_V, m_ConstantInt(ShAmt))) &&
927 ShAmt < BitWidth) {
928 // For those bits in RHS that are known, we can propagate them to known
929 // bits in V shifted to the right by C.
931 RHSKnown <<= ShAmt;
932 Known = Known.unionWith(RHSKnown);
933 }
934 break;
935 case ICmpInst::ICMP_NE: {
936 // assume (V & B != 0) where B is a power of 2
937 const APInt *BPow2;
938 if (C->isZero() && match(LHS, m_And(m_V, m_Power2(BPow2))))
939 Known.One |= *BPow2;
940 break;
941 }
942 default: {
943 const APInt *Offset = nullptr;
944 if (match(LHS, m_CombineOr(m_V, m_AddLike(m_V, m_APInt(Offset))))) {
946 if (Offset)
947 LHSRange = LHSRange.sub(*Offset);
948 Known = Known.unionWith(LHSRange.toKnownBits());
949 }
950 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
951 // X & Y u> C -> X u> C && Y u> C
952 // X nuw- Y u> C -> X u> C
953 if (match(LHS, m_c_And(m_V, m_Value())) ||
954 match(LHS, m_NUWSub(m_V, m_Value())))
955 Known.One.setHighBits(
956 (*C + (Pred == ICmpInst::ICMP_UGT)).countLeadingOnes());
957 }
958 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
959 // X | Y u< C -> X u< C && Y u< C
960 // X nuw+ Y u< C -> X u< C && Y u< C
961 if (match(LHS, m_c_Or(m_V, m_Value())) ||
962 match(LHS, m_c_NUWAdd(m_V, m_Value()))) {
963 Known.Zero.setHighBits(
964 (*C - (Pred == ICmpInst::ICMP_ULT)).countLeadingZeros());
965 }
966 }
967 } break;
968 }
969}
970
971static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp,
972 KnownBits &Known,
973 const SimplifyQuery &SQ, bool Invert) {
975 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
976 Value *LHS = Cmp->getOperand(0);
977 Value *RHS = Cmp->getOperand(1);
978
979 // Handle icmp pred (trunc V), C
980 if (match(LHS, m_Trunc(m_Specific(V)))) {
981 KnownBits DstKnown(LHS->getType()->getScalarSizeInBits());
982 computeKnownBitsFromCmp(LHS, Pred, LHS, RHS, DstKnown, SQ);
984 Known = Known.unionWith(DstKnown.zext(Known.getBitWidth()));
985 else
986 Known = Known.unionWith(DstKnown.anyext(Known.getBitWidth()));
987 return;
988 }
989
990 computeKnownBitsFromCmp(V, Pred, LHS, RHS, Known, SQ);
991}
992
994 KnownBits &Known, const SimplifyQuery &SQ,
995 bool Invert, unsigned Depth) {
996 Value *A, *B;
999 KnownBits Known2(Known.getBitWidth());
1000 KnownBits Known3(Known.getBitWidth());
1001 computeKnownBitsFromCond(V, A, Known2, SQ, Invert, Depth + 1);
1002 computeKnownBitsFromCond(V, B, Known3, SQ, Invert, Depth + 1);
1003 if (Invert ? match(Cond, m_LogicalOr(m_Value(), m_Value()))
1005 Known2 = Known2.unionWith(Known3);
1006 else
1007 Known2 = Known2.intersectWith(Known3);
1008 Known = Known.unionWith(Known2);
1009 return;
1010 }
1011
1012 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
1013 computeKnownBitsFromICmpCond(V, Cmp, Known, SQ, Invert);
1014 return;
1015 }
1016
1017 if (match(Cond, m_Trunc(m_Specific(V)))) {
1018 KnownBits DstKnown(1);
1019 if (Invert) {
1020 DstKnown.setAllZero();
1021 } else {
1022 DstKnown.setAllOnes();
1023 }
1025 Known = Known.unionWith(DstKnown.zext(Known.getBitWidth()));
1026 return;
1027 }
1028 Known = Known.unionWith(DstKnown.anyext(Known.getBitWidth()));
1029 return;
1030 }
1031
1033 computeKnownBitsFromCond(V, A, Known, SQ, !Invert, Depth + 1);
1034}
1035
1037 const SimplifyQuery &Q, unsigned Depth) {
1038 // Handle injected condition.
1039 if (Q.CC && Q.CC->AffectedValues.contains(V))
1040 computeKnownBitsFromCond(V, Q.CC->Cond, Known, Q, Q.CC->Invert, Depth);
1041
1042 if (!Q.CxtI)
1043 return;
1044
1045 if (Q.DC && Q.DT) {
1046 // Handle dominating conditions.
1047 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
1048 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
1049 if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
1050 computeKnownBitsFromCond(V, BI->getCondition(), Known, Q,
1051 /*Invert*/ false, Depth);
1052
1053 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
1054 if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
1055 computeKnownBitsFromCond(V, BI->getCondition(), Known, Q,
1056 /*Invert*/ true, Depth);
1057 }
1058
1059 if (Known.hasConflict())
1060 Known.resetAll();
1061 }
1062
1063 if (!Q.AC)
1064 return;
1065
1066 unsigned BitWidth = Known.getBitWidth();
1067
1068 // Note that the patterns below need to be kept in sync with the code
1069 // in AssumptionCache::updateAffectedValues.
1070
1071 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) {
1072 if (!Elem.Assume)
1073 continue;
1074
1075 AssumeInst *I = cast<AssumeInst>(Elem.Assume);
1076 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
1077 "Got assumption for the wrong function!");
1078
1079 if (Elem.Index != AssumptionCache::ExprResultIdx) {
1080 if (!V->getType()->isPointerTy())
1081 continue;
1083 *I, I->bundle_op_info_begin()[Elem.Index])) {
1084 // Allow AllowEphemerals in isValidAssumeForContext, as the CxtI might
1085 // be the producer of the pointer in the bundle. At the moment, align
1086 // assumptions aren't optimized away.
1087 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
1088 isPowerOf2_64(RK.ArgValue) &&
1089 isValidAssumeForContext(I, Q.CxtI, Q.DT, /*AllowEphemerals*/ true))
1090 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
1091 }
1092 continue;
1093 }
1094
1095 // Warning: This loop can end up being somewhat performance sensitive.
1096 // We're running this loop for once for each value queried resulting in a
1097 // runtime of ~O(#assumes * #values).
1098
1099 Value *Arg = I->getArgOperand(0);
1100
1101 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1102 assert(BitWidth == 1 && "assume operand is not i1?");
1103 (void)BitWidth;
1104 Known.setAllOnes();
1105 return;
1106 }
1107 if (match(Arg, m_Not(m_Specific(V))) &&
1109 assert(BitWidth == 1 && "assume operand is not i1?");
1110 (void)BitWidth;
1111 Known.setAllZero();
1112 return;
1113 }
1114 auto *Trunc = dyn_cast<TruncInst>(Arg);
1115 if (Trunc && Trunc->getOperand(0) == V &&
1117 if (Trunc->hasNoUnsignedWrap()) {
1119 return;
1120 }
1121 Known.One.setBit(0);
1122 return;
1123 }
1124
1125 // The remaining tests are all recursive, so bail out if we hit the limit.
1127 continue;
1128
1129 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
1130 if (!Cmp)
1131 continue;
1132
1133 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
1134 continue;
1135
1136 computeKnownBitsFromICmpCond(V, Cmp, Known, Q, /*Invert=*/false);
1137 }
1138
1139 // Conflicting assumption: Undefined behavior will occur on this execution
1140 // path.
1141 if (Known.hasConflict())
1142 Known.resetAll();
1143}
1144
1145/// Compute known bits from a shift operator, including those with a
1146/// non-constant shift amount. Known is the output of this function. Known2 is a
1147/// pre-allocated temporary with the same bit width as Known and on return
1148/// contains the known bit of the shift value source. KF is an
1149/// operator-specific function that, given the known-bits and a shift amount,
1150/// compute the implied known-bits of the shift operator's result respectively
1151/// for that shift amount. The results from calling KF are conservatively
1152/// combined for all permitted shift amounts.
1154 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1155 KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth,
1156 function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) {
1157 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
1158 computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
1159 // To limit compile-time impact, only query isKnownNonZero() if we know at
1160 // least something about the shift amount.
1161 bool ShAmtNonZero =
1162 Known.isNonZero() ||
1163 (Known.getMaxValue().ult(Known.getBitWidth()) &&
1164 isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth + 1));
1165 Known = KF(Known2, Known, ShAmtNonZero);
1166}
1167
1168static KnownBits
1169getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
1170 const KnownBits &KnownLHS, const KnownBits &KnownRHS,
1171 const SimplifyQuery &Q, unsigned Depth) {
1172 unsigned BitWidth = KnownLHS.getBitWidth();
1173 KnownBits KnownOut(BitWidth);
1174 bool IsAnd = false;
1175 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero();
1176 Value *X = nullptr, *Y = nullptr;
1177
1178 switch (I->getOpcode()) {
1179 case Instruction::And:
1180 KnownOut = KnownLHS & KnownRHS;
1181 IsAnd = true;
1182 // and(x, -x) is common idioms that will clear all but lowest set
1183 // bit. If we have a single known bit in x, we can clear all bits
1184 // above it.
1185 // TODO: instcombine often reassociates independent `and` which can hide
1186 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
1187 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) {
1188 // -(-x) == x so using whichever (LHS/RHS) gets us a better result.
1189 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros())
1190 KnownOut = KnownLHS.blsi();
1191 else
1192 KnownOut = KnownRHS.blsi();
1193 }
1194 break;
1195 case Instruction::Or:
1196 KnownOut = KnownLHS | KnownRHS;
1197 break;
1198 case Instruction::Xor:
1199 KnownOut = KnownLHS ^ KnownRHS;
1200 // xor(x, x-1) is common idioms that will clear all but lowest set
1201 // bit. If we have a single known bit in x, we can clear all bits
1202 // above it.
1203 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C !=
1204 // -1 but for the purpose of demanded bits (xor(x, x-C) &
1205 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern
1206 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1).
1207 if (HasKnownOne &&
1209 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS;
1210 KnownOut = XBits.blsmsk();
1211 }
1212 break;
1213 default:
1214 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'");
1215 }
1216
1217 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1218 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit.
1219 // here we handle the more general case of adding any odd number by
1220 // matching the form and/xor/or(x, add(x, y)) where y is odd.
1221 // TODO: This could be generalized to clearing any bit set in y where the
1222 // following bit is known to be unset in y.
1223 if (!KnownOut.Zero[0] && !KnownOut.One[0] &&
1227 KnownBits KnownY(BitWidth);
1228 computeKnownBits(Y, DemandedElts, KnownY, Q, Depth + 1);
1229 if (KnownY.countMinTrailingOnes() > 0) {
1230 if (IsAnd)
1231 KnownOut.Zero.setBit(0);
1232 else
1233 KnownOut.One.setBit(0);
1234 }
1235 }
1236 return KnownOut;
1237}
1238
1240 const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q,
1241 unsigned Depth,
1242 const function_ref<KnownBits(const KnownBits &, const KnownBits &)>
1243 KnownBitsFunc) {
1244 APInt DemandedEltsLHS, DemandedEltsRHS;
1246 DemandedElts, DemandedEltsLHS,
1247 DemandedEltsRHS);
1248
1249 const auto ComputeForSingleOpFunc =
1250 [Depth, &Q, KnownBitsFunc](const Value *Op, APInt &DemandedEltsOp) {
1251 return KnownBitsFunc(
1252 computeKnownBits(Op, DemandedEltsOp, Q, Depth + 1),
1253 computeKnownBits(Op, DemandedEltsOp << 1, Q, Depth + 1));
1254 };
1255
1256 if (DemandedEltsRHS.isZero())
1257 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS);
1258 if (DemandedEltsLHS.isZero())
1259 return ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS);
1260
1261 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS)
1262 .intersectWith(ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS));
1263}
1264
1265// Public so this can be used in `SimplifyDemandedUseBits`.
1267 const KnownBits &KnownLHS,
1268 const KnownBits &KnownRHS,
1269 const SimplifyQuery &SQ,
1270 unsigned Depth) {
1271 auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
1272 APInt DemandedElts =
1273 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
1274
1275 return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, SQ,
1276 Depth);
1277}
1278
1280 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
1281 // Without vscale_range, we only know that vscale is non-zero.
1282 if (!Attr.isValid())
1284
1285 unsigned AttrMin = Attr.getVScaleRangeMin();
1286 // Minimum is larger than vscale width, result is always poison.
1287 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth)
1288 return ConstantRange::getEmpty(BitWidth);
1289
1290 APInt Min(BitWidth, AttrMin);
1291 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax();
1292 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth)
1294
1295 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1);
1296}
1297
1299 Value *Arm, bool Invert,
1300 const SimplifyQuery &Q, unsigned Depth) {
1301 // If we have a constant arm, we are done.
1302 if (Known.isConstant())
1303 return;
1304
1305 // See what condition implies about the bits of the select arm.
1306 KnownBits CondRes(Known.getBitWidth());
1307 computeKnownBitsFromCond(Arm, Cond, CondRes, Q, Invert, Depth + 1);
1308 // If we don't get any information from the condition, no reason to
1309 // proceed.
1310 if (CondRes.isUnknown())
1311 return;
1312
1313 // We can have conflict if the condition is dead. I.e if we have
1314 // (x | 64) < 32 ? (x | 64) : y
1315 // we will have conflict at bit 6 from the condition/the `or`.
1316 // In that case just return. Its not particularly important
1317 // what we do, as this select is going to be simplified soon.
1318 CondRes = CondRes.unionWith(Known);
1319 if (CondRes.hasConflict())
1320 return;
1321
1322 // Finally make sure the information we found is valid. This is relatively
1323 // expensive so it's left for the very end.
1324 if (!isGuaranteedNotToBeUndef(Arm, Q.AC, Q.CxtI, Q.DT, Depth + 1))
1325 return;
1326
1327 // Finally, we know we get information from the condition and its valid,
1328 // so return it.
1329 Known = CondRes;
1330}
1331
1332// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
1333// Returns the input and lower/upper bounds.
1334static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
1335 const APInt *&CLow, const APInt *&CHigh) {
1337 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
1338 "Input should be a Select!");
1339
1340 const Value *LHS = nullptr, *RHS = nullptr;
1342 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
1343 return false;
1344
1345 if (!match(RHS, m_APInt(CLow)))
1346 return false;
1347
1348 const Value *LHS2 = nullptr, *RHS2 = nullptr;
1350 if (getInverseMinMaxFlavor(SPF) != SPF2)
1351 return false;
1352
1353 if (!match(RHS2, m_APInt(CHigh)))
1354 return false;
1355
1356 if (SPF == SPF_SMIN)
1357 std::swap(CLow, CHigh);
1358
1359 In = LHS2;
1360 return CLow->sle(*CHigh);
1361}
1362
1364 const APInt *&CLow,
1365 const APInt *&CHigh) {
1366 assert((II->getIntrinsicID() == Intrinsic::smin ||
1367 II->getIntrinsicID() == Intrinsic::smax) &&
1368 "Must be smin/smax");
1369
1370 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
1371 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
1372 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
1373 !match(II->getArgOperand(1), m_APInt(CLow)) ||
1374 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
1375 return false;
1376
1377 if (II->getIntrinsicID() == Intrinsic::smin)
1378 std::swap(CLow, CHigh);
1379 return CLow->sle(*CHigh);
1380}
1381
1383 KnownBits &Known) {
1384 const APInt *CLow, *CHigh;
1385 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
1386 Known = Known.unionWith(
1387 ConstantRange::getNonEmpty(*CLow, *CHigh + 1).toKnownBits());
1388}
1389
1391 const APInt &DemandedElts,
1392 KnownBits &Known,
1393 const SimplifyQuery &Q,
1394 unsigned Depth) {
1395 unsigned BitWidth = Known.getBitWidth();
1396
1397 KnownBits Known2(BitWidth);
1398 switch (I->getOpcode()) {
1399 default: break;
1400 case Instruction::Load:
1401 if (MDNode *MD =
1402 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1404 break;
1405 case Instruction::And:
1406 computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
1407 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
1408
1409 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
1410 break;
1411 case Instruction::Or:
1412 computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
1413 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
1414
1415 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
1416 break;
1417 case Instruction::Xor:
1418 computeKnownBits(I->getOperand(1), DemandedElts, Known, Q, Depth + 1);
1419 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
1420
1421 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Q, Depth);
1422 break;
1423 case Instruction::Mul: {
1426 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, NUW,
1427 DemandedElts, Known, Known2, Q, Depth);
1428 break;
1429 }
1430 case Instruction::UDiv: {
1431 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1432 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
1433 Known =
1434 KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1435 break;
1436 }
1437 case Instruction::SDiv: {
1438 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1439 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
1440 Known =
1441 KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1442 break;
1443 }
1444 case Instruction::Select: {
1445 auto ComputeForArm = [&](Value *Arm, bool Invert) {
1446 KnownBits Res(Known.getBitWidth());
1447 computeKnownBits(Arm, DemandedElts, Res, Q, Depth + 1);
1448 adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Q, Depth);
1449 return Res;
1450 };
1451 // Only known if known in both the LHS and RHS.
1452 Known =
1453 ComputeForArm(I->getOperand(1), /*Invert=*/false)
1454 .intersectWith(ComputeForArm(I->getOperand(2), /*Invert=*/true));
1455 break;
1456 }
1457 case Instruction::FPTrunc:
1458 case Instruction::FPExt:
1459 case Instruction::FPToUI:
1460 case Instruction::FPToSI:
1461 case Instruction::SIToFP:
1462 case Instruction::UIToFP:
1463 break; // Can't work with floating point.
1464 case Instruction::PtrToInt:
1465 case Instruction::PtrToAddr:
1466 case Instruction::IntToPtr:
1467 // Fall through and handle them the same as zext/trunc.
1468 [[fallthrough]];
1469 case Instruction::ZExt:
1470 case Instruction::Trunc: {
1471 Type *SrcTy = I->getOperand(0)->getType();
1472
1473 unsigned SrcBitWidth;
1474 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1475 // which fall through here.
1476 Type *ScalarTy = SrcTy->getScalarType();
1477 SrcBitWidth = ScalarTy->isPointerTy() ?
1478 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1479 Q.DL.getTypeSizeInBits(ScalarTy);
1480
1481 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1482 Known = Known.anyextOrTrunc(SrcBitWidth);
1483 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1484 if (auto *Inst = dyn_cast<PossiblyNonNegInst>(I);
1485 Inst && Inst->hasNonNeg() && !Known.isNegative())
1486 Known.makeNonNegative();
1487 Known = Known.zextOrTrunc(BitWidth);
1488 break;
1489 }
1490 case Instruction::BitCast: {
1491 Type *SrcTy = I->getOperand(0)->getType();
1492 if (SrcTy->isIntOrPtrTy() &&
1493 // TODO: For now, not handling conversions like:
1494 // (bitcast i64 %x to <2 x i32>)
1495 !I->getType()->isVectorTy()) {
1496 computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
1497 break;
1498 }
1499
1500 const Value *V;
1501 // Handle bitcast from floating point to integer.
1502 if (match(I, m_ElementWiseBitCast(m_Value(V))) &&
1503 V->getType()->isFPOrFPVectorTy()) {
1504 Type *FPType = V->getType()->getScalarType();
1505 KnownFPClass Result =
1506 computeKnownFPClass(V, DemandedElts, fcAllFlags, Q, Depth + 1);
1507 FPClassTest FPClasses = Result.KnownFPClasses;
1508
1509 // TODO: Treat it as zero/poison if the use of I is unreachable.
1510 if (FPClasses == fcNone)
1511 break;
1512
1513 if (Result.isKnownNever(fcNormal | fcSubnormal | fcNan)) {
1514 Known.setAllConflict();
1515
1516 if (FPClasses & fcInf)
1518 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt()));
1519
1520 if (FPClasses & fcZero)
1522 APInt::getZero(FPType->getScalarSizeInBits())));
1523
1524 Known.Zero.clearSignBit();
1525 Known.One.clearSignBit();
1526 }
1527
1528 if (Result.SignBit) {
1529 if (*Result.SignBit)
1530 Known.makeNegative();
1531 else
1532 Known.makeNonNegative();
1533 }
1534
1535 break;
1536 }
1537
1538 // Handle cast from vector integer type to scalar or vector integer.
1539 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1540 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1541 !I->getType()->isIntOrIntVectorTy() ||
1542 isa<ScalableVectorType>(I->getType()))
1543 break;
1544
1545 unsigned NumElts = DemandedElts.getBitWidth();
1546 bool IsLE = Q.DL.isLittleEndian();
1547 // Look through a cast from narrow vector elements to wider type.
1548 // Examples: v4i32 -> v2i64, v3i8 -> v24
1549 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1550 if (BitWidth % SubBitWidth == 0) {
1551 // Known bits are automatically intersected across demanded elements of a
1552 // vector. So for example, if a bit is computed as known zero, it must be
1553 // zero across all demanded elements of the vector.
1554 //
1555 // For this bitcast, each demanded element of the output is sub-divided
1556 // across a set of smaller vector elements in the source vector. To get
1557 // the known bits for an entire element of the output, compute the known
1558 // bits for each sub-element sequentially. This is done by shifting the
1559 // one-set-bit demanded elements parameter across the sub-elements for
1560 // consecutive calls to computeKnownBits. We are using the demanded
1561 // elements parameter as a mask operator.
1562 //
1563 // The known bits of each sub-element are then inserted into place
1564 // (dependent on endian) to form the full result of known bits.
1565 unsigned SubScale = BitWidth / SubBitWidth;
1566 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1567 for (unsigned i = 0; i != NumElts; ++i) {
1568 if (DemandedElts[i])
1569 SubDemandedElts.setBit(i * SubScale);
1570 }
1571
1572 KnownBits KnownSrc(SubBitWidth);
1573 for (unsigned i = 0; i != SubScale; ++i) {
1574 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, Q,
1575 Depth + 1);
1576 unsigned ShiftElt = IsLE ? i : SubScale - 1 - i;
1577 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1578 }
1579 }
1580 // Look through a cast from wider vector elements to narrow type.
1581 // Examples: v2i64 -> v4i32
1582 if (SubBitWidth % BitWidth == 0) {
1583 unsigned SubScale = SubBitWidth / BitWidth;
1584 KnownBits KnownSrc(SubBitWidth);
1585 APInt SubDemandedElts =
1586 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
1587 computeKnownBits(I->getOperand(0), SubDemandedElts, KnownSrc, Q,
1588 Depth + 1);
1589
1590 Known.setAllConflict();
1591 for (unsigned i = 0; i != NumElts; ++i) {
1592 if (DemandedElts[i]) {
1593 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
1594 unsigned Offset = (Shifts % SubScale) * BitWidth;
1595 Known = Known.intersectWith(KnownSrc.extractBits(BitWidth, Offset));
1596 if (Known.isUnknown())
1597 break;
1598 }
1599 }
1600 }
1601 break;
1602 }
1603 case Instruction::SExt: {
1604 // Compute the bits in the result that are not present in the input.
1605 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1606
1607 Known = Known.trunc(SrcBitWidth);
1608 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1609 // If the sign bit of the input is known set or clear, then we know the
1610 // top bits of the result.
1611 Known = Known.sext(BitWidth);
1612 break;
1613 }
1614 case Instruction::Shl: {
1617 auto KF = [NUW, NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1618 bool ShAmtNonZero) {
1619 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1620 };
1621 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
1622 KF);
1623 // Trailing zeros of a right-shifted constant never decrease.
1624 const APInt *C;
1625 if (match(I->getOperand(0), m_APInt(C)))
1626 Known.Zero.setLowBits(C->countr_zero());
1627 break;
1628 }
1629 case Instruction::LShr: {
1630 bool Exact = Q.IIQ.isExact(cast<BinaryOperator>(I));
1631 auto KF = [Exact](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1632 bool ShAmtNonZero) {
1633 return KnownBits::lshr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
1634 };
1635 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
1636 KF);
1637 // Leading zeros of a left-shifted constant never decrease.
1638 const APInt *C;
1639 if (match(I->getOperand(0), m_APInt(C)))
1640 Known.Zero.setHighBits(C->countl_zero());
1641 break;
1642 }
1643 case Instruction::AShr: {
1644 bool Exact = Q.IIQ.isExact(cast<BinaryOperator>(I));
1645 auto KF = [Exact](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1646 bool ShAmtNonZero) {
1647 return KnownBits::ashr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
1648 };
1649 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Q, Depth,
1650 KF);
1651 break;
1652 }
1653 case Instruction::Sub: {
1656 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, NUW,
1657 DemandedElts, Known, Known2, Q, Depth);
1658 break;
1659 }
1660 case Instruction::Add: {
1663 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, NUW,
1664 DemandedElts, Known, Known2, Q, Depth);
1665 break;
1666 }
1667 case Instruction::SRem:
1668 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1669 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
1670 Known = KnownBits::srem(Known, Known2);
1671 break;
1672
1673 case Instruction::URem:
1674 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
1675 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
1676 Known = KnownBits::urem(Known, Known2);
1677 break;
1678 case Instruction::Alloca:
1680 break;
1681 case Instruction::GetElementPtr: {
1682 // Analyze all of the subscripts of this getelementptr instruction
1683 // to determine if we can prove known low zero bits.
1684 computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
1685 // Accumulate the constant indices in a separate variable
1686 // to minimize the number of calls to computeForAddSub.
1687 unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
1688 APInt AccConstIndices(IndexWidth, 0);
1689
1690 auto AddIndexToKnown = [&](KnownBits IndexBits) {
1691 if (IndexWidth == BitWidth) {
1692 // Note that inbounds does *not* guarantee nsw for the addition, as only
1693 // the offset is signed, while the base address is unsigned.
1694 Known = KnownBits::add(Known, IndexBits);
1695 } else {
1696 // If the index width is smaller than the pointer width, only add the
1697 // value to the low bits.
1698 assert(IndexWidth < BitWidth &&
1699 "Index width can't be larger than pointer width");
1700 Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0);
1701 }
1702 };
1703
1705 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1706 // TrailZ can only become smaller, short-circuit if we hit zero.
1707 if (Known.isUnknown())
1708 break;
1709
1710 Value *Index = I->getOperand(i);
1711
1712 // Handle case when index is zero.
1713 Constant *CIndex = dyn_cast<Constant>(Index);
1714 if (CIndex && CIndex->isZeroValue())
1715 continue;
1716
1717 if (StructType *STy = GTI.getStructTypeOrNull()) {
1718 // Handle struct member offset arithmetic.
1719
1720 assert(CIndex &&
1721 "Access to structure field must be known at compile time");
1722
1723 if (CIndex->getType()->isVectorTy())
1724 Index = CIndex->getSplatValue();
1725
1726 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1727 const StructLayout *SL = Q.DL.getStructLayout(STy);
1728 uint64_t Offset = SL->getElementOffset(Idx);
1729 AccConstIndices += Offset;
1730 continue;
1731 }
1732
1733 // Handle array index arithmetic.
1734 Type *IndexedTy = GTI.getIndexedType();
1735 if (!IndexedTy->isSized()) {
1736 Known.resetAll();
1737 break;
1738 }
1739
1740 TypeSize Stride = GTI.getSequentialElementStride(Q.DL);
1741 uint64_t StrideInBytes = Stride.getKnownMinValue();
1742 if (!Stride.isScalable()) {
1743 // Fast path for constant offset.
1744 if (auto *CI = dyn_cast<ConstantInt>(Index)) {
1745 AccConstIndices +=
1746 CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes;
1747 continue;
1748 }
1749 }
1750
1751 KnownBits IndexBits =
1752 computeKnownBits(Index, Q, Depth + 1).sextOrTrunc(IndexWidth);
1753 KnownBits ScalingFactor(IndexWidth);
1754 // Multiply by current sizeof type.
1755 // &A[i] == A + i * sizeof(*A[i]).
1756 if (Stride.isScalable()) {
1757 // For scalable types the only thing we know about sizeof is
1758 // that this is a multiple of the minimum size.
1759 ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes));
1760 } else {
1761 ScalingFactor =
1762 KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes));
1763 }
1764 AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor));
1765 }
1766 if (!Known.isUnknown() && !AccConstIndices.isZero())
1767 AddIndexToKnown(KnownBits::makeConstant(AccConstIndices));
1768 break;
1769 }
1770 case Instruction::PHI: {
1771 const PHINode *P = cast<PHINode>(I);
1772 BinaryOperator *BO = nullptr;
1773 Value *R = nullptr, *L = nullptr;
1774 if (matchSimpleRecurrence(P, BO, R, L)) {
1775 // Handle the case of a simple two-predecessor recurrence PHI.
1776 // There's a lot more that could theoretically be done here, but
1777 // this is sufficient to catch some interesting cases.
1778 unsigned Opcode = BO->getOpcode();
1779
1780 switch (Opcode) {
1781 // If this is a shift recurrence, we know the bits being shifted in. We
1782 // can combine that with information about the start value of the
1783 // recurrence to conclude facts about the result. If this is a udiv
1784 // recurrence, we know that the result can never exceed either the
1785 // numerator or the start value, whichever is greater.
1786 case Instruction::LShr:
1787 case Instruction::AShr:
1788 case Instruction::Shl:
1789 case Instruction::UDiv:
1790 if (BO->getOperand(0) != I)
1791 break;
1792 [[fallthrough]];
1793
1794 // For a urem recurrence, the result can never exceed the start value. The
1795 // phi could either be the numerator or the denominator.
1796 case Instruction::URem: {
1797 // We have matched a recurrence of the form:
1798 // %iv = [R, %entry], [%iv.next, %backedge]
1799 // %iv.next = shift_op %iv, L
1800
1801 // Recurse with the phi context to avoid concern about whether facts
1802 // inferred hold at original context instruction. TODO: It may be
1803 // correct to use the original context. IF warranted, explore and
1804 // add sufficient tests to cover.
1806 RecQ.CxtI = P;
1807 computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1);
1808 switch (Opcode) {
1809 case Instruction::Shl:
1810 // A shl recurrence will only increase the tailing zeros
1811 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1812 break;
1813 case Instruction::LShr:
1814 case Instruction::UDiv:
1815 case Instruction::URem:
1816 // lshr, udiv, and urem recurrences will preserve the leading zeros of
1817 // the start value.
1818 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1819 break;
1820 case Instruction::AShr:
1821 // An ashr recurrence will extend the initial sign bit
1822 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1823 Known.One.setHighBits(Known2.countMinLeadingOnes());
1824 break;
1825 }
1826 break;
1827 }
1828
1829 // Check for operations that have the property that if
1830 // both their operands have low zero bits, the result
1831 // will have low zero bits.
1832 case Instruction::Add:
1833 case Instruction::Sub:
1834 case Instruction::And:
1835 case Instruction::Or:
1836 case Instruction::Mul: {
1837 // Change the context instruction to the "edge" that flows into the
1838 // phi. This is important because that is where the value is actually
1839 // "evaluated" even though it is used later somewhere else. (see also
1840 // D69571).
1842
1843 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1844 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1845 Instruction *LInst = P->getIncomingBlock(1 - OpNum)->getTerminator();
1846
1847 // Ok, we have a PHI of the form L op= R. Check for low
1848 // zero bits.
1849 RecQ.CxtI = RInst;
1850 computeKnownBits(R, DemandedElts, Known2, RecQ, Depth + 1);
1851
1852 // We need to take the minimum number of known bits
1853 KnownBits Known3(BitWidth);
1854 RecQ.CxtI = LInst;
1855 computeKnownBits(L, DemandedElts, Known3, RecQ, Depth + 1);
1856
1857 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1858 Known3.countMinTrailingZeros()));
1859
1860 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1861 if (!OverflowOp || !Q.IIQ.hasNoSignedWrap(OverflowOp))
1862 break;
1863
1864 switch (Opcode) {
1865 // If initial value of recurrence is nonnegative, and we are adding
1866 // a nonnegative number with nsw, the result can only be nonnegative
1867 // or poison value regardless of the number of times we execute the
1868 // add in phi recurrence. If initial value is negative and we are
1869 // adding a negative number with nsw, the result can only be
1870 // negative or poison value. Similar arguments apply to sub and mul.
1871 //
1872 // (add non-negative, non-negative) --> non-negative
1873 // (add negative, negative) --> negative
1874 case Instruction::Add: {
1875 if (Known2.isNonNegative() && Known3.isNonNegative())
1876 Known.makeNonNegative();
1877 else if (Known2.isNegative() && Known3.isNegative())
1878 Known.makeNegative();
1879 break;
1880 }
1881
1882 // (sub nsw non-negative, negative) --> non-negative
1883 // (sub nsw negative, non-negative) --> negative
1884 case Instruction::Sub: {
1885 if (BO->getOperand(0) != I)
1886 break;
1887 if (Known2.isNonNegative() && Known3.isNegative())
1888 Known.makeNonNegative();
1889 else if (Known2.isNegative() && Known3.isNonNegative())
1890 Known.makeNegative();
1891 break;
1892 }
1893
1894 // (mul nsw non-negative, non-negative) --> non-negative
1895 case Instruction::Mul:
1896 if (Known2.isNonNegative() && Known3.isNonNegative())
1897 Known.makeNonNegative();
1898 break;
1899
1900 default:
1901 break;
1902 }
1903 break;
1904 }
1905
1906 default:
1907 break;
1908 }
1909 }
1910
1911 // Unreachable blocks may have zero-operand PHI nodes.
1912 if (P->getNumIncomingValues() == 0)
1913 break;
1914
1915 // Otherwise take the unions of the known bit sets of the operands,
1916 // taking conservative care to avoid excessive recursion.
1917 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) {
1918 // Skip if every incoming value references to ourself.
1919 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1920 break;
1921
1922 Known.setAllConflict();
1923 for (const Use &U : P->operands()) {
1924 Value *IncValue;
1925 const PHINode *CxtPhi;
1926 Instruction *CxtI;
1927 breakSelfRecursivePHI(&U, P, IncValue, CxtI, &CxtPhi);
1928 // Skip direct self references.
1929 if (IncValue == P)
1930 continue;
1931
1932 // Change the context instruction to the "edge" that flows into the
1933 // phi. This is important because that is where the value is actually
1934 // "evaluated" even though it is used later somewhere else. (see also
1935 // D69571).
1937
1938 Known2 = KnownBits(BitWidth);
1939
1940 // Recurse, but cap the recursion to one level, because we don't
1941 // want to waste time spinning around in loops.
1942 // TODO: See if we can base recursion limiter on number of incoming phi
1943 // edges so we don't overly clamp analysis.
1944 computeKnownBits(IncValue, DemandedElts, Known2, RecQ,
1946
1947 // See if we can further use a conditional branch into the phi
1948 // to help us determine the range of the value.
1949 if (!Known2.isConstant()) {
1950 CmpPredicate Pred;
1951 const APInt *RHSC;
1952 BasicBlock *TrueSucc, *FalseSucc;
1953 // TODO: Use RHS Value and compute range from its known bits.
1954 if (match(RecQ.CxtI,
1955 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1956 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1957 // Check for cases of duplicate successors.
1958 if ((TrueSucc == CxtPhi->getParent()) !=
1959 (FalseSucc == CxtPhi->getParent())) {
1960 // If we're using the false successor, invert the predicate.
1961 if (FalseSucc == CxtPhi->getParent())
1962 Pred = CmpInst::getInversePredicate(Pred);
1963 // Get the knownbits implied by the incoming phi condition.
1964 auto CR = ConstantRange::makeExactICmpRegion(Pred, *RHSC);
1965 KnownBits KnownUnion = Known2.unionWith(CR.toKnownBits());
1966 // We can have conflicts here if we are analyzing deadcode (its
1967 // impossible for us reach this BB based the icmp).
1968 if (KnownUnion.hasConflict()) {
1969 // No reason to continue analyzing in a known dead region, so
1970 // just resetAll and break. This will cause us to also exit the
1971 // outer loop.
1972 Known.resetAll();
1973 break;
1974 }
1975 Known2 = KnownUnion;
1976 }
1977 }
1978 }
1979
1980 Known = Known.intersectWith(Known2);
1981 // If all bits have been ruled out, there's no need to check
1982 // more operands.
1983 if (Known.isUnknown())
1984 break;
1985 }
1986 }
1987 break;
1988 }
1989 case Instruction::Call:
1990 case Instruction::Invoke: {
1991 // If range metadata is attached to this call, set known bits from that,
1992 // and then intersect with known bits based on other properties of the
1993 // function.
1994 if (MDNode *MD =
1995 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1997
1998 const auto *CB = cast<CallBase>(I);
1999
2000 if (std::optional<ConstantRange> Range = CB->getRange())
2001 Known = Known.unionWith(Range->toKnownBits());
2002
2003 if (const Value *RV = CB->getReturnedArgOperand()) {
2004 if (RV->getType() == I->getType()) {
2005 computeKnownBits(RV, Known2, Q, Depth + 1);
2006 Known = Known.unionWith(Known2);
2007 // If the function doesn't return properly for all input values
2008 // (e.g. unreachable exits) then there might be conflicts between the
2009 // argument value and the range metadata. Simply discard the known bits
2010 // in case of conflicts.
2011 if (Known.hasConflict())
2012 Known.resetAll();
2013 }
2014 }
2015 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2016 switch (II->getIntrinsicID()) {
2017 default:
2018 break;
2019 case Intrinsic::abs: {
2020 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2021 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
2022 Known = Known.unionWith(Known2.abs(IntMinIsPoison));
2023 break;
2024 }
2025 case Intrinsic::bitreverse:
2026 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2027 Known = Known.unionWith(Known2.reverseBits());
2028 break;
2029 case Intrinsic::bswap:
2030 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2031 Known = Known.unionWith(Known2.byteSwap());
2032 break;
2033 case Intrinsic::ctlz: {
2034 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2035 // If we have a known 1, its position is our upper bound.
2036 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2037 // If this call is poison for 0 input, the result will be less than 2^n.
2038 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
2039 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
2040 unsigned LowBits = llvm::bit_width(PossibleLZ);
2041 Known.Zero.setBitsFrom(LowBits);
2042 break;
2043 }
2044 case Intrinsic::cttz: {
2045 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2046 // If we have a known 1, its position is our upper bound.
2047 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2048 // If this call is poison for 0 input, the result will be less than 2^n.
2049 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
2050 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
2051 unsigned LowBits = llvm::bit_width(PossibleTZ);
2052 Known.Zero.setBitsFrom(LowBits);
2053 break;
2054 }
2055 case Intrinsic::ctpop: {
2056 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2057 // We can bound the space the count needs. Also, bits known to be zero
2058 // can't contribute to the population.
2059 unsigned BitsPossiblySet = Known2.countMaxPopulation();
2060 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
2061 Known.Zero.setBitsFrom(LowBits);
2062 // TODO: we could bound KnownOne using the lower bound on the number
2063 // of bits which might be set provided by popcnt KnownOne2.
2064 break;
2065 }
2066 case Intrinsic::fshr:
2067 case Intrinsic::fshl: {
2068 const APInt *SA;
2069 if (!match(I->getOperand(2), m_APInt(SA)))
2070 break;
2071
2072 // Normalize to funnel shift left.
2073 uint64_t ShiftAmt = SA->urem(BitWidth);
2074 if (II->getIntrinsicID() == Intrinsic::fshr)
2075 ShiftAmt = BitWidth - ShiftAmt;
2076
2077 KnownBits Known3(BitWidth);
2078 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Q, Depth + 1);
2079 computeKnownBits(I->getOperand(1), DemandedElts, Known3, Q, Depth + 1);
2080
2081 Known2 <<= ShiftAmt;
2082 Known3 >>= BitWidth - ShiftAmt;
2083 Known = Known2.unionWith(Known3);
2084 break;
2085 }
2086 case Intrinsic::uadd_sat:
2087 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2088 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2089 Known = KnownBits::uadd_sat(Known, Known2);
2090 break;
2091 case Intrinsic::usub_sat:
2092 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2093 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2094 Known = KnownBits::usub_sat(Known, Known2);
2095 break;
2096 case Intrinsic::sadd_sat:
2097 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2098 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2099 Known = KnownBits::sadd_sat(Known, Known2);
2100 break;
2101 case Intrinsic::ssub_sat:
2102 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2103 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2104 Known = KnownBits::ssub_sat(Known, Known2);
2105 break;
2106 // Vec reverse preserves bits from input vec.
2107 case Intrinsic::vector_reverse:
2108 computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known, Q,
2109 Depth + 1);
2110 break;
2111 // for min/max/and/or reduce, any bit common to each element in the
2112 // input vec is set in the output.
2113 case Intrinsic::vector_reduce_and:
2114 case Intrinsic::vector_reduce_or:
2115 case Intrinsic::vector_reduce_umax:
2116 case Intrinsic::vector_reduce_umin:
2117 case Intrinsic::vector_reduce_smax:
2118 case Intrinsic::vector_reduce_smin:
2119 computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
2120 break;
2121 case Intrinsic::vector_reduce_xor: {
2122 computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
2123 // The zeros common to all vecs are zero in the output.
2124 // If the number of elements is odd, then the common ones remain. If the
2125 // number of elements is even, then the common ones becomes zeros.
2126 auto *VecTy = cast<VectorType>(I->getOperand(0)->getType());
2127 // Even, so the ones become zeros.
2128 bool EvenCnt = VecTy->getElementCount().isKnownEven();
2129 if (EvenCnt)
2130 Known.Zero |= Known.One;
2131 // Maybe even element count so need to clear ones.
2132 if (VecTy->isScalableTy() || EvenCnt)
2133 Known.One.clearAllBits();
2134 break;
2135 }
2136 case Intrinsic::umin:
2137 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2138 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2139 Known = KnownBits::umin(Known, Known2);
2140 break;
2141 case Intrinsic::umax:
2142 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2143 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2144 Known = KnownBits::umax(Known, Known2);
2145 break;
2146 case Intrinsic::smin:
2147 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2148 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2149 Known = KnownBits::smin(Known, Known2);
2151 break;
2152 case Intrinsic::smax:
2153 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2154 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2155 Known = KnownBits::smax(Known, Known2);
2157 break;
2158 case Intrinsic::ptrmask: {
2159 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2160
2161 const Value *Mask = I->getOperand(1);
2162 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits());
2163 computeKnownBits(Mask, DemandedElts, Known2, Q, Depth + 1);
2164 // TODO: 1-extend would be more precise.
2165 Known &= Known2.anyextOrTrunc(BitWidth);
2166 break;
2167 }
2168 case Intrinsic::x86_sse2_pmulh_w:
2169 case Intrinsic::x86_avx2_pmulh_w:
2170 case Intrinsic::x86_avx512_pmulh_w_512:
2171 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2172 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2173 Known = KnownBits::mulhs(Known, Known2);
2174 break;
2175 case Intrinsic::x86_sse2_pmulhu_w:
2176 case Intrinsic::x86_avx2_pmulhu_w:
2177 case Intrinsic::x86_avx512_pmulhu_w_512:
2178 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth + 1);
2179 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Q, Depth + 1);
2180 Known = KnownBits::mulhu(Known, Known2);
2181 break;
2182 case Intrinsic::x86_sse42_crc32_64_64:
2183 Known.Zero.setBitsFrom(32);
2184 break;
2185 case Intrinsic::x86_ssse3_phadd_d_128:
2186 case Intrinsic::x86_ssse3_phadd_w_128:
2187 case Intrinsic::x86_avx2_phadd_d:
2188 case Intrinsic::x86_avx2_phadd_w: {
2190 I, DemandedElts, Q, Depth,
2191 [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
2192 return KnownBits::add(KnownLHS, KnownRHS);
2193 });
2194 break;
2195 }
2196 case Intrinsic::x86_ssse3_phadd_sw_128:
2197 case Intrinsic::x86_avx2_phadd_sw: {
2199 I, DemandedElts, Q, Depth, KnownBits::sadd_sat);
2200 break;
2201 }
2202 case Intrinsic::x86_ssse3_phsub_d_128:
2203 case Intrinsic::x86_ssse3_phsub_w_128:
2204 case Intrinsic::x86_avx2_phsub_d:
2205 case Intrinsic::x86_avx2_phsub_w: {
2207 I, DemandedElts, Q, Depth,
2208 [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
2209 return KnownBits::sub(KnownLHS, KnownRHS);
2210 });
2211 break;
2212 }
2213 case Intrinsic::x86_ssse3_phsub_sw_128:
2214 case Intrinsic::x86_avx2_phsub_sw: {
2216 I, DemandedElts, Q, Depth, KnownBits::ssub_sat);
2217 break;
2218 }
2219 case Intrinsic::riscv_vsetvli:
2220 case Intrinsic::riscv_vsetvlimax: {
2221 bool HasAVL = II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
2222 const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth);
2224 cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue());
2225 RISCVVType::VLMUL VLMUL = static_cast<RISCVVType::VLMUL>(
2226 cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue());
2227 uint64_t MaxVLEN =
2228 Range.getUnsignedMax().getZExtValue() * RISCV::RVVBitsPerBlock;
2229 uint64_t MaxVL = MaxVLEN / RISCVVType::getSEWLMULRatio(SEW, VLMUL);
2230
2231 // Result of vsetvli must be not larger than AVL.
2232 if (HasAVL)
2233 if (auto *CI = dyn_cast<ConstantInt>(II->getArgOperand(0)))
2234 MaxVL = std::min(MaxVL, CI->getZExtValue());
2235
2236 unsigned KnownZeroFirstBit = Log2_32(MaxVL) + 1;
2237 if (BitWidth > KnownZeroFirstBit)
2238 Known.Zero.setBitsFrom(KnownZeroFirstBit);
2239 break;
2240 }
2241 case Intrinsic::vscale: {
2242 if (!II->getParent() || !II->getFunction())
2243 break;
2244
2245 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits();
2246 break;
2247 }
2248 }
2249 }
2250 break;
2251 }
2252 case Instruction::ShuffleVector: {
2253 if (auto *Splat = getSplatValue(I)) {
2254 computeKnownBits(Splat, Known, Q, Depth + 1);
2255 break;
2256 }
2257
2258 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
2259 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
2260 if (!Shuf) {
2261 Known.resetAll();
2262 return;
2263 }
2264 // For undef elements, we don't know anything about the common state of
2265 // the shuffle result.
2266 APInt DemandedLHS, DemandedRHS;
2267 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
2268 Known.resetAll();
2269 return;
2270 }
2271 Known.setAllConflict();
2272 if (!!DemandedLHS) {
2273 const Value *LHS = Shuf->getOperand(0);
2274 computeKnownBits(LHS, DemandedLHS, Known, Q, Depth + 1);
2275 // If we don't know any bits, early out.
2276 if (Known.isUnknown())
2277 break;
2278 }
2279 if (!!DemandedRHS) {
2280 const Value *RHS = Shuf->getOperand(1);
2281 computeKnownBits(RHS, DemandedRHS, Known2, Q, Depth + 1);
2282 Known = Known.intersectWith(Known2);
2283 }
2284 break;
2285 }
2286 case Instruction::InsertElement: {
2287 if (isa<ScalableVectorType>(I->getType())) {
2288 Known.resetAll();
2289 return;
2290 }
2291 const Value *Vec = I->getOperand(0);
2292 const Value *Elt = I->getOperand(1);
2293 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
2294 unsigned NumElts = DemandedElts.getBitWidth();
2295 APInt DemandedVecElts = DemandedElts;
2296 bool NeedsElt = true;
2297 // If we know the index we are inserting too, clear it from Vec check.
2298 if (CIdx && CIdx->getValue().ult(NumElts)) {
2299 DemandedVecElts.clearBit(CIdx->getZExtValue());
2300 NeedsElt = DemandedElts[CIdx->getZExtValue()];
2301 }
2302
2303 Known.setAllConflict();
2304 if (NeedsElt) {
2305 computeKnownBits(Elt, Known, Q, Depth + 1);
2306 // If we don't know any bits, early out.
2307 if (Known.isUnknown())
2308 break;
2309 }
2310
2311 if (!DemandedVecElts.isZero()) {
2312 computeKnownBits(Vec, DemandedVecElts, Known2, Q, Depth + 1);
2313 Known = Known.intersectWith(Known2);
2314 }
2315 break;
2316 }
2317 case Instruction::ExtractElement: {
2318 // Look through extract element. If the index is non-constant or
2319 // out-of-range demand all elements, otherwise just the extracted element.
2320 const Value *Vec = I->getOperand(0);
2321 const Value *Idx = I->getOperand(1);
2322 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2323 if (isa<ScalableVectorType>(Vec->getType())) {
2324 // FIXME: there's probably *something* we can do with scalable vectors
2325 Known.resetAll();
2326 break;
2327 }
2328 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
2329 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2330 if (CIdx && CIdx->getValue().ult(NumElts))
2331 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2332 computeKnownBits(Vec, DemandedVecElts, Known, Q, Depth + 1);
2333 break;
2334 }
2335 case Instruction::ExtractValue:
2336 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
2338 if (EVI->getNumIndices() != 1) break;
2339 if (EVI->getIndices()[0] == 0) {
2340 switch (II->getIntrinsicID()) {
2341 default: break;
2342 case Intrinsic::uadd_with_overflow:
2343 case Intrinsic::sadd_with_overflow:
2345 true, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
2346 /* NUW=*/false, DemandedElts, Known, Known2, Q, Depth);
2347 break;
2348 case Intrinsic::usub_with_overflow:
2349 case Intrinsic::ssub_with_overflow:
2351 false, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
2352 /* NUW=*/false, DemandedElts, Known, Known2, Q, Depth);
2353 break;
2354 case Intrinsic::umul_with_overflow:
2355 case Intrinsic::smul_with_overflow:
2356 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
2357 false, DemandedElts, Known, Known2, Q, Depth);
2358 break;
2359 }
2360 }
2361 }
2362 break;
2363 case Instruction::Freeze:
2364 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
2365 Depth + 1))
2366 computeKnownBits(I->getOperand(0), Known, Q, Depth + 1);
2367 break;
2368 }
2369}
2370
2371/// Determine which bits of V are known to be either zero or one and return
2372/// them.
2373KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
2374 const SimplifyQuery &Q, unsigned Depth) {
2375 KnownBits Known(getBitWidth(V->getType(), Q.DL));
2376 ::computeKnownBits(V, DemandedElts, Known, Q, Depth);
2377 return Known;
2378}
2379
2380/// Determine which bits of V are known to be either zero or one and return
2381/// them.
2383 unsigned Depth) {
2384 KnownBits Known(getBitWidth(V->getType(), Q.DL));
2385 computeKnownBits(V, Known, Q, Depth);
2386 return Known;
2387}
2388
2389/// Determine which bits of V are known to be either zero or one and return
2390/// them in the Known bit set.
2391///
2392/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
2393/// we cannot optimize based on the assumption that it is zero without changing
2394/// it to be an explicit zero. If we don't change it to zero, other code could
2395/// optimized based on the contradictory assumption that it is non-zero.
2396/// Because instcombine aggressively folds operations with undef args anyway,
2397/// this won't lose us code quality.
2398///
2399/// This function is defined on values with integer type, values with pointer
2400/// type, and vectors of integers. In the case
2401/// where V is a vector, known zero, and known one values are the
2402/// same width as the vector element, and the bit is set only if it is true
2403/// for all of the demanded elements in the vector specified by DemandedElts.
2404void computeKnownBits(const Value *V, const APInt &DemandedElts,
2405 KnownBits &Known, const SimplifyQuery &Q,
2406 unsigned Depth) {
2407 if (!DemandedElts) {
2408 // No demanded elts, better to assume we don't know anything.
2409 Known.resetAll();
2410 return;
2411 }
2412
2413 assert(V && "No Value?");
2414 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2415
2416#ifndef NDEBUG
2417 Type *Ty = V->getType();
2418 unsigned BitWidth = Known.getBitWidth();
2419
2420 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2421 "Not integer or pointer type!");
2422
2423 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2424 assert(
2425 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2426 "DemandedElt width should equal the fixed vector number of elements");
2427 } else {
2428 assert(DemandedElts == APInt(1, 1) &&
2429 "DemandedElt width should be 1 for scalars or scalable vectors");
2430 }
2431
2432 Type *ScalarTy = Ty->getScalarType();
2433 if (ScalarTy->isPointerTy()) {
2434 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
2435 "V and Known should have same BitWidth");
2436 } else {
2437 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
2438 "V and Known should have same BitWidth");
2439 }
2440#endif
2441
2442 const APInt *C;
2443 if (match(V, m_APInt(C))) {
2444 // We know all of the bits for a scalar constant or a splat vector constant!
2445 Known = KnownBits::makeConstant(*C);
2446 return;
2447 }
2448 // Null and aggregate-zero are all-zeros.
2450 Known.setAllZero();
2451 return;
2452 }
2453 // Handle a constant vector by taking the intersection of the known bits of
2454 // each element.
2456 assert(!isa<ScalableVectorType>(V->getType()));
2457 // We know that CDV must be a vector of integers. Take the intersection of
2458 // each element.
2459 Known.setAllConflict();
2460 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2461 if (!DemandedElts[i])
2462 continue;
2463 APInt Elt = CDV->getElementAsAPInt(i);
2464 Known.Zero &= ~Elt;
2465 Known.One &= Elt;
2466 }
2467 if (Known.hasConflict())
2468 Known.resetAll();
2469 return;
2470 }
2471
2472 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2473 assert(!isa<ScalableVectorType>(V->getType()));
2474 // We know that CV must be a vector of integers. Take the intersection of
2475 // each element.
2476 Known.setAllConflict();
2477 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2478 if (!DemandedElts[i])
2479 continue;
2480 Constant *Element = CV->getAggregateElement(i);
2481 if (isa<PoisonValue>(Element))
2482 continue;
2483 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2484 if (!ElementCI) {
2485 Known.resetAll();
2486 return;
2487 }
2488 const APInt &Elt = ElementCI->getValue();
2489 Known.Zero &= ~Elt;
2490 Known.One &= Elt;
2491 }
2492 if (Known.hasConflict())
2493 Known.resetAll();
2494 return;
2495 }
2496
2497 // Start out not knowing anything.
2498 Known.resetAll();
2499
2500 // We can't imply anything about undefs.
2501 if (isa<UndefValue>(V))
2502 return;
2503
2504 // There's no point in looking through other users of ConstantData for
2505 // assumptions. Confirm that we've handled them all.
2506 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2507
2508 if (const auto *A = dyn_cast<Argument>(V))
2509 if (std::optional<ConstantRange> Range = A->getRange())
2510 Known = Range->toKnownBits();
2511
2512 // All recursive calls that increase depth must come after this.
2514 return;
2515
2516 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2517 // the bits of its aliasee.
2518 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2519 if (!GA->isInterposable())
2520 computeKnownBits(GA->getAliasee(), Known, Q, Depth + 1);
2521 return;
2522 }
2523
2524 if (const Operator *I = dyn_cast<Operator>(V))
2525 computeKnownBitsFromOperator(I, DemandedElts, Known, Q, Depth);
2526 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2527 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2528 Known = CR->toKnownBits();
2529 }
2530
2531 // Aligned pointers have trailing zeros - refine Known.Zero set
2532 if (isa<PointerType>(V->getType())) {
2533 Align Alignment = V->getPointerAlignment(Q.DL);
2534 Known.Zero.setLowBits(Log2(Alignment));
2535 }
2536
2537 // computeKnownBitsFromContext strictly refines Known.
2538 // Therefore, we run them after computeKnownBitsFromOperator.
2539
2540 // Check whether we can determine known bits from context such as assumes.
2541 computeKnownBitsFromContext(V, Known, Q, Depth);
2542}
2543
2544/// Try to detect a recurrence that the value of the induction variable is
2545/// always a power of two (or zero).
2546static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2547 SimplifyQuery &Q, unsigned Depth) {
2548 BinaryOperator *BO = nullptr;
2549 Value *Start = nullptr, *Step = nullptr;
2550 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2551 return false;
2552
2553 // Initial value must be a power of two.
2554 for (const Use &U : PN->operands()) {
2555 if (U.get() == Start) {
2556 // Initial value comes from a different BB, need to adjust context
2557 // instruction for analysis.
2558 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2559 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Q, Depth))
2560 return false;
2561 }
2562 }
2563
2564 // Except for Mul, the induction variable must be on the left side of the
2565 // increment expression, otherwise its value can be arbitrary.
2566 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2567 return false;
2568
2569 Q.CxtI = BO->getParent()->getTerminator();
2570 switch (BO->getOpcode()) {
2571 case Instruction::Mul:
2572 // Power of two is closed under multiplication.
2573 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2574 Q.IIQ.hasNoSignedWrap(BO)) &&
2575 isKnownToBeAPowerOfTwo(Step, OrZero, Q, Depth);
2576 case Instruction::SDiv:
2577 // Start value must not be signmask for signed division, so simply being a
2578 // power of two is not sufficient, and it has to be a constant.
2579 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2580 return false;
2581 [[fallthrough]];
2582 case Instruction::UDiv:
2583 // Divisor must be a power of two.
2584 // If OrZero is false, cannot guarantee induction variable is non-zero after
2585 // division, same for Shr, unless it is exact division.
2586 return (OrZero || Q.IIQ.isExact(BO)) &&
2587 isKnownToBeAPowerOfTwo(Step, false, Q, Depth);
2588 case Instruction::Shl:
2589 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2590 case Instruction::AShr:
2591 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2592 return false;
2593 [[fallthrough]];
2594 case Instruction::LShr:
2595 return OrZero || Q.IIQ.isExact(BO);
2596 default:
2597 return false;
2598 }
2599}
2600
2601/// Return true if we can infer that \p V is known to be a power of 2 from
2602/// dominating condition \p Cond (e.g., ctpop(V) == 1).
2603static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero,
2604 const Value *Cond,
2605 bool CondIsTrue) {
2606 CmpPredicate Pred;
2607 const APInt *RHSC;
2609 m_APInt(RHSC))))
2610 return false;
2611 if (!CondIsTrue)
2612 Pred = ICmpInst::getInversePredicate(Pred);
2613 // ctpop(V) u< 2
2614 if (OrZero && Pred == ICmpInst::ICMP_ULT && *RHSC == 2)
2615 return true;
2616 // ctpop(V) == 1
2617 return Pred == ICmpInst::ICMP_EQ && *RHSC == 1;
2618}
2619
2620/// Return true if the given value is known to have exactly one
2621/// bit set when defined. For vectors return true if every element is known to
2622/// be a power of two when defined. Supports values with integer or pointer
2623/// types and vectors of integers.
2624bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero,
2625 const SimplifyQuery &Q, unsigned Depth) {
2626 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2627
2628 if (isa<Constant>(V))
2629 return OrZero ? match(V, m_Power2OrZero()) : match(V, m_Power2());
2630
2631 // i1 is by definition a power of 2 or zero.
2632 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2633 return true;
2634
2635 // Try to infer from assumptions.
2636 if (Q.AC && Q.CxtI) {
2637 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
2638 if (!AssumeVH)
2639 continue;
2640 CallInst *I = cast<CallInst>(AssumeVH);
2641 if (isImpliedToBeAPowerOfTwoFromCond(V, OrZero, I->getArgOperand(0),
2642 /*CondIsTrue=*/true) &&
2644 return true;
2645 }
2646 }
2647
2648 // Handle dominating conditions.
2649 if (Q.DC && Q.CxtI && Q.DT) {
2650 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
2651 Value *Cond = BI->getCondition();
2652
2653 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
2655 /*CondIsTrue=*/true) &&
2656 Q.DT->dominates(Edge0, Q.CxtI->getParent()))
2657 return true;
2658
2659 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
2661 /*CondIsTrue=*/false) &&
2662 Q.DT->dominates(Edge1, Q.CxtI->getParent()))
2663 return true;
2664 }
2665 }
2666
2667 auto *I = dyn_cast<Instruction>(V);
2668 if (!I)
2669 return false;
2670
2671 if (Q.CxtI && match(V, m_VScale())) {
2672 const Function *F = Q.CxtI->getFunction();
2673 // The vscale_range indicates vscale is a power-of-two.
2674 return F->hasFnAttribute(Attribute::VScaleRange);
2675 }
2676
2677 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2678 // it is shifted off the end then the result is undefined.
2679 if (match(I, m_Shl(m_One(), m_Value())))
2680 return true;
2681
2682 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2683 // the bottom. If it is shifted off the bottom then the result is undefined.
2684 if (match(I, m_LShr(m_SignMask(), m_Value())))
2685 return true;
2686
2687 // The remaining tests are all recursive, so bail out if we hit the limit.
2689 return false;
2690
2691 switch (I->getOpcode()) {
2692 case Instruction::ZExt:
2693 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
2694 case Instruction::Trunc:
2695 return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
2696 case Instruction::Shl:
2697 if (OrZero || Q.IIQ.hasNoUnsignedWrap(I) || Q.IIQ.hasNoSignedWrap(I))
2698 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
2699 return false;
2700 case Instruction::LShr:
2701 if (OrZero || Q.IIQ.isExact(cast<BinaryOperator>(I)))
2702 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
2703 return false;
2704 case Instruction::UDiv:
2706 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth);
2707 return false;
2708 case Instruction::Mul:
2709 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth) &&
2710 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth) &&
2711 (OrZero || isKnownNonZero(I, Q, Depth));
2712 case Instruction::And:
2713 // A power of two and'd with anything is a power of two or zero.
2714 if (OrZero &&
2715 (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Q, Depth) ||
2716 isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Q, Depth)))
2717 return true;
2718 // X & (-X) is always a power of two or zero.
2719 if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) ||
2720 match(I->getOperand(1), m_Neg(m_Specific(I->getOperand(0)))))
2721 return OrZero || isKnownNonZero(I->getOperand(0), Q, Depth);
2722 return false;
2723 case Instruction::Add: {
2724 // Adding a power-of-two or zero to the same power-of-two or zero yields
2725 // either the original power-of-two, a larger power-of-two or zero.
2727 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2728 Q.IIQ.hasNoSignedWrap(VOBO)) {
2729 if (match(I->getOperand(0),
2730 m_c_And(m_Specific(I->getOperand(1)), m_Value())) &&
2731 isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth))
2732 return true;
2733 if (match(I->getOperand(1),
2734 m_c_And(m_Specific(I->getOperand(0)), m_Value())) &&
2735 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Q, Depth))
2736 return true;
2737
2738 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2739 KnownBits LHSBits(BitWidth);
2740 computeKnownBits(I->getOperand(0), LHSBits, Q, Depth);
2741
2742 KnownBits RHSBits(BitWidth);
2743 computeKnownBits(I->getOperand(1), RHSBits, Q, Depth);
2744 // If i8 V is a power of two or zero:
2745 // ZeroBits: 1 1 1 0 1 1 1 1
2746 // ~ZeroBits: 0 0 0 1 0 0 0 0
2747 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2748 // If OrZero isn't set, we cannot give back a zero result.
2749 // Make sure either the LHS or RHS has a bit set.
2750 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2751 return true;
2752 }
2753
2754 // LShr(UINT_MAX, Y) + 1 is a power of two (if add is nuw) or zero.
2755 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO))
2756 if (match(I, m_Add(m_LShr(m_AllOnes(), m_Value()), m_One())))
2757 return true;
2758 return false;
2759 }
2760 case Instruction::Select:
2761 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Q, Depth) &&
2762 isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Q, Depth);
2763 case Instruction::PHI: {
2764 // A PHI node is power of two if all incoming values are power of two, or if
2765 // it is an induction variable where in each step its value is a power of
2766 // two.
2767 auto *PN = cast<PHINode>(I);
2769
2770 // Check if it is an induction variable and always power of two.
2771 if (isPowerOfTwoRecurrence(PN, OrZero, RecQ, Depth))
2772 return true;
2773
2774 // Recursively check all incoming values. Limit recursion to 2 levels, so
2775 // that search complexity is limited to number of operands^2.
2776 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2777 return llvm::all_of(PN->operands(), [&](const Use &U) {
2778 // Value is power of 2 if it is coming from PHI node itself by induction.
2779 if (U.get() == PN)
2780 return true;
2781
2782 // Change the context instruction to the incoming block where it is
2783 // evaluated.
2784 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2785 return isKnownToBeAPowerOfTwo(U.get(), OrZero, RecQ, NewDepth);
2786 });
2787 }
2788 case Instruction::Invoke:
2789 case Instruction::Call: {
2790 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2791 switch (II->getIntrinsicID()) {
2792 case Intrinsic::umax:
2793 case Intrinsic::smax:
2794 case Intrinsic::umin:
2795 case Intrinsic::smin:
2796 return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Q, Depth) &&
2797 isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
2798 // bswap/bitreverse just move around bits, but don't change any 1s/0s
2799 // thus dont change pow2/non-pow2 status.
2800 case Intrinsic::bitreverse:
2801 case Intrinsic::bswap:
2802 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
2803 case Intrinsic::fshr:
2804 case Intrinsic::fshl:
2805 // If Op0 == Op1, this is a rotate. is_pow2(rotate(x, y)) == is_pow2(x)
2806 if (II->getArgOperand(0) == II->getArgOperand(1))
2807 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Q, Depth);
2808 break;
2809 default:
2810 break;
2811 }
2812 }
2813 return false;
2814 }
2815 default:
2816 return false;
2817 }
2818}
2819
2820/// Test whether a GEP's result is known to be non-null.
2821///
2822/// Uses properties inherent in a GEP to try to determine whether it is known
2823/// to be non-null.
2824///
2825/// Currently this routine does not support vector GEPs.
2826static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q,
2827 unsigned Depth) {
2828 const Function *F = nullptr;
2829 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2830 F = I->getFunction();
2831
2832 // If the gep is nuw or inbounds with invalid null pointer, then the GEP
2833 // may be null iff the base pointer is null and the offset is zero.
2834 if (!GEP->hasNoUnsignedWrap() &&
2835 !(GEP->isInBounds() &&
2836 !NullPointerIsDefined(F, GEP->getPointerAddressSpace())))
2837 return false;
2838
2839 // FIXME: Support vector-GEPs.
2840 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2841
2842 // If the base pointer is non-null, we cannot walk to a null address with an
2843 // inbounds GEP in address space zero.
2844 if (isKnownNonZero(GEP->getPointerOperand(), Q, Depth))
2845 return true;
2846
2847 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2848 // If so, then the GEP cannot produce a null pointer, as doing so would
2849 // inherently violate the inbounds contract within address space zero.
2851 GTI != GTE; ++GTI) {
2852 // Struct types are easy -- they must always be indexed by a constant.
2853 if (StructType *STy = GTI.getStructTypeOrNull()) {
2854 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2855 unsigned ElementIdx = OpC->getZExtValue();
2856 const StructLayout *SL = Q.DL.getStructLayout(STy);
2857 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2858 if (ElementOffset > 0)
2859 return true;
2860 continue;
2861 }
2862
2863 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2864 if (GTI.getSequentialElementStride(Q.DL).isZero())
2865 continue;
2866
2867 // Fast path the constant operand case both for efficiency and so we don't
2868 // increment Depth when just zipping down an all-constant GEP.
2869 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2870 if (!OpC->isZero())
2871 return true;
2872 continue;
2873 }
2874
2875 // We post-increment Depth here because while isKnownNonZero increments it
2876 // as well, when we pop back up that increment won't persist. We don't want
2877 // to recurse 10k times just because we have 10k GEP operands. We don't
2878 // bail completely out because we want to handle constant GEPs regardless
2879 // of depth.
2881 continue;
2882
2883 if (isKnownNonZero(GTI.getOperand(), Q, Depth))
2884 return true;
2885 }
2886
2887 return false;
2888}
2889
2891 const Instruction *CtxI,
2892 const DominatorTree *DT) {
2893 assert(!isa<Constant>(V) && "Called for constant?");
2894
2895 if (!CtxI || !DT)
2896 return false;
2897
2898 unsigned NumUsesExplored = 0;
2899 for (auto &U : V->uses()) {
2900 // Avoid massive lists
2901 if (NumUsesExplored >= DomConditionsMaxUses)
2902 break;
2903 NumUsesExplored++;
2904
2905 const Instruction *UI = cast<Instruction>(U.getUser());
2906 // If the value is used as an argument to a call or invoke, then argument
2907 // attributes may provide an answer about null-ness.
2908 if (V->getType()->isPointerTy()) {
2909 if (const auto *CB = dyn_cast<CallBase>(UI)) {
2910 if (CB->isArgOperand(&U) &&
2911 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
2912 /*AllowUndefOrPoison=*/false) &&
2913 DT->dominates(CB, CtxI))
2914 return true;
2915 }
2916 }
2917
2918 // If the value is used as a load/store, then the pointer must be non null.
2919 if (V == getLoadStorePointerOperand(UI)) {
2922 DT->dominates(UI, CtxI))
2923 return true;
2924 }
2925
2926 if ((match(UI, m_IDiv(m_Value(), m_Specific(V))) ||
2927 match(UI, m_IRem(m_Value(), m_Specific(V)))) &&
2928 isValidAssumeForContext(UI, CtxI, DT))
2929 return true;
2930
2931 // Consider only compare instructions uniquely controlling a branch
2932 Value *RHS;
2933 CmpPredicate Pred;
2934 if (!match(UI, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2935 continue;
2936
2937 bool NonNullIfTrue;
2938 if (cmpExcludesZero(Pred, RHS))
2939 NonNullIfTrue = true;
2941 NonNullIfTrue = false;
2942 else
2943 continue;
2944
2947 for (const auto *CmpU : UI->users()) {
2948 assert(WorkList.empty() && "Should be!");
2949 if (Visited.insert(CmpU).second)
2950 WorkList.push_back(CmpU);
2951
2952 while (!WorkList.empty()) {
2953 auto *Curr = WorkList.pop_back_val();
2954
2955 // If a user is an AND, add all its users to the work list. We only
2956 // propagate "pred != null" condition through AND because it is only
2957 // correct to assume that all conditions of AND are met in true branch.
2958 // TODO: Support similar logic of OR and EQ predicate?
2959 if (NonNullIfTrue)
2960 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2961 for (const auto *CurrU : Curr->users())
2962 if (Visited.insert(CurrU).second)
2963 WorkList.push_back(CurrU);
2964 continue;
2965 }
2966
2967 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2968 assert(BI->isConditional() && "uses a comparison!");
2969
2970 BasicBlock *NonNullSuccessor =
2971 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2972 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2973 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2974 return true;
2975 } else if (NonNullIfTrue && isGuard(Curr) &&
2976 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2977 return true;
2978 }
2979 }
2980 }
2981 }
2982
2983 return false;
2984}
2985
2986/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2987/// ensure that the value it's attached to is never Value? 'RangeType' is
2988/// is the type of the value described by the range.
2989static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2990 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2991 assert(NumRanges >= 1);
2992 for (unsigned i = 0; i < NumRanges; ++i) {
2994 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2996 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2997 ConstantRange Range(Lower->getValue(), Upper->getValue());
2998 if (Range.contains(Value))
2999 return false;
3000 }
3001 return true;
3002}
3003
3004/// Try to detect a recurrence that monotonically increases/decreases from a
3005/// non-zero starting value. These are common as induction variables.
3006static bool isNonZeroRecurrence(const PHINode *PN) {
3007 BinaryOperator *BO = nullptr;
3008 Value *Start = nullptr, *Step = nullptr;
3009 const APInt *StartC, *StepC;
3010 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
3011 !match(Start, m_APInt(StartC)) || StartC->isZero())
3012 return false;
3013
3014 switch (BO->getOpcode()) {
3015 case Instruction::Add:
3016 // Starting from non-zero and stepping away from zero can never wrap back
3017 // to zero.
3018 return BO->hasNoUnsignedWrap() ||
3019 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
3020 StartC->isNegative() == StepC->isNegative());
3021 case Instruction::Mul:
3022 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
3023 match(Step, m_APInt(StepC)) && !StepC->isZero();
3024 case Instruction::Shl:
3025 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
3026 case Instruction::AShr:
3027 case Instruction::LShr:
3028 return BO->isExact();
3029 default:
3030 return false;
3031 }
3032}
3033
3034static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {
3036 m_Specific(Op1), m_Zero()))) ||
3038 m_Specific(Op0), m_Zero())));
3039}
3040
3041static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q,
3042 unsigned BitWidth, Value *X, Value *Y, bool NSW,
3043 bool NUW, unsigned Depth) {
3044 // (X + (X != 0)) is non zero
3045 if (matchOpWithOpEqZero(X, Y))
3046 return true;
3047
3048 if (NUW)
3049 return isKnownNonZero(Y, DemandedElts, Q, Depth) ||
3050 isKnownNonZero(X, DemandedElts, Q, Depth);
3051
3052 KnownBits XKnown = computeKnownBits(X, DemandedElts, Q, Depth);
3053 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Q, Depth);
3054
3055 // If X and Y are both non-negative (as signed values) then their sum is not
3056 // zero unless both X and Y are zero.
3057 if (XKnown.isNonNegative() && YKnown.isNonNegative())
3058 if (isKnownNonZero(Y, DemandedElts, Q, Depth) ||
3059 isKnownNonZero(X, DemandedElts, Q, Depth))
3060 return true;
3061
3062 // If X and Y are both negative (as signed values) then their sum is not
3063 // zero unless both X and Y equal INT_MIN.
3064 if (XKnown.isNegative() && YKnown.isNegative()) {
3066 // The sign bit of X is set. If some other bit is set then X is not equal
3067 // to INT_MIN.
3068 if (XKnown.One.intersects(Mask))
3069 return true;
3070 // The sign bit of Y is set. If some other bit is set then Y is not equal
3071 // to INT_MIN.
3072 if (YKnown.One.intersects(Mask))
3073 return true;
3074 }
3075
3076 // The sum of a non-negative number and a power of two is not zero.
3077 if (XKnown.isNonNegative() &&
3078 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Q, Depth))
3079 return true;
3080 if (YKnown.isNonNegative() &&
3081 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Q, Depth))
3082 return true;
3083
3084 return KnownBits::add(XKnown, YKnown, NSW, NUW).isNonZero();
3085}
3086
3087static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q,
3088 unsigned BitWidth, Value *X, Value *Y,
3089 unsigned Depth) {
3090 // (X - (X != 0)) is non zero
3091 // ((X != 0) - X) is non zero
3092 if (matchOpWithOpEqZero(X, Y))
3093 return true;
3094
3095 // TODO: Move this case into isKnownNonEqual().
3096 if (auto *C = dyn_cast<Constant>(X))
3097 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth))
3098 return true;
3099
3100 return ::isKnownNonEqual(X, Y, DemandedElts, Q, Depth);
3101}
3102
3103static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q,
3104 unsigned BitWidth, Value *X, Value *Y, bool NSW,
3105 bool NUW, unsigned Depth) {
3106 // If X and Y are non-zero then so is X * Y as long as the multiplication
3107 // does not overflow.
3108 if (NSW || NUW)
3109 return isKnownNonZero(X, DemandedElts, Q, Depth) &&
3110 isKnownNonZero(Y, DemandedElts, Q, Depth);
3111
3112 // If either X or Y is odd, then if the other is non-zero the result can't
3113 // be zero.
3114 KnownBits XKnown = computeKnownBits(X, DemandedElts, Q, Depth);
3115 if (XKnown.One[0])
3116 return isKnownNonZero(Y, DemandedElts, Q, Depth);
3117
3118 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Q, Depth);
3119 if (YKnown.One[0])
3120 return XKnown.isNonZero() || isKnownNonZero(X, DemandedElts, Q, Depth);
3121
3122 // If there exists any subset of X (sX) and subset of Y (sY) s.t sX * sY is
3123 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking
3124 // the lowest known One of X and Y. If they are non-zero, the result
3125 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing
3126 // X.CountLeadingZeros + Y.CountLeadingZeros < BitWidth.
3127 return (XKnown.countMaxTrailingZeros() + YKnown.countMaxTrailingZeros()) <
3128 BitWidth;
3129}
3130
3131static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
3132 const SimplifyQuery &Q, const KnownBits &KnownVal,
3133 unsigned Depth) {
3134 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
3135 switch (I->getOpcode()) {
3136 case Instruction::Shl:
3137 return Lhs.shl(Rhs);
3138 case Instruction::LShr:
3139 return Lhs.lshr(Rhs);
3140 case Instruction::AShr:
3141 return Lhs.ashr(Rhs);
3142 default:
3143 llvm_unreachable("Unknown Shift Opcode");
3144 }
3145 };
3146
3147 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
3148 switch (I->getOpcode()) {
3149 case Instruction::Shl:
3150 return Lhs.lshr(Rhs);
3151 case Instruction::LShr:
3152 case Instruction::AShr:
3153 return Lhs.shl(Rhs);
3154 default:
3155 llvm_unreachable("Unknown Shift Opcode");
3156 }
3157 };
3158
3159 if (KnownVal.isUnknown())
3160 return false;
3161
3162 KnownBits KnownCnt =
3163 computeKnownBits(I->getOperand(1), DemandedElts, Q, Depth);
3164 APInt MaxShift = KnownCnt.getMaxValue();
3165 unsigned NumBits = KnownVal.getBitWidth();
3166 if (MaxShift.uge(NumBits))
3167 return false;
3168
3169 if (!ShiftOp(KnownVal.One, MaxShift).isZero())
3170 return true;
3171
3172 // If all of the bits shifted out are known to be zero, and Val is known
3173 // non-zero then at least one non-zero bit must remain.
3174 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift)
3175 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) &&
3176 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth))
3177 return true;
3178
3179 return false;
3180}
3181
3183 const APInt &DemandedElts,
3184 const SimplifyQuery &Q, unsigned Depth) {
3185 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL);
3186 switch (I->getOpcode()) {
3187 case Instruction::Alloca:
3188 // Alloca never returns null, malloc might.
3189 return I->getType()->getPointerAddressSpace() == 0;
3190 case Instruction::GetElementPtr:
3191 if (I->getType()->isPointerTy())
3193 break;
3194 case Instruction::BitCast: {
3195 // We need to be a bit careful here. We can only peek through the bitcast
3196 // if the scalar size of elements in the operand are smaller than and a
3197 // multiple of the size they are casting too. Take three cases:
3198 //
3199 // 1) Unsafe:
3200 // bitcast <2 x i16> %NonZero to <4 x i8>
3201 //
3202 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a
3203 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't
3204 // guranteed (imagine just sign bit set in the 2 i16 elements).
3205 //
3206 // 2) Unsafe:
3207 // bitcast <4 x i3> %NonZero to <3 x i4>
3208 //
3209 // Even though the scalar size of the src (`i3`) is smaller than the
3210 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4`
3211 // its possible for the `3 x i4` elements to be zero because there are
3212 // some elements in the destination that don't contain any full src
3213 // element.
3214 //
3215 // 3) Safe:
3216 // bitcast <4 x i8> %NonZero to <2 x i16>
3217 //
3218 // This is always safe as non-zero in the 4 i8 elements implies
3219 // non-zero in the combination of any two adjacent ones. Since i8 is a
3220 // multiple of i16, each i16 is guranteed to have 2 full i8 elements.
3221 // This all implies the 2 i16 elements are non-zero.
3222 Type *FromTy = I->getOperand(0)->getType();
3223 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) &&
3224 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0)
3225 return isKnownNonZero(I->getOperand(0), Q, Depth);
3226 } break;
3227 case Instruction::IntToPtr:
3228 // Note that we have to take special care to avoid looking through
3229 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
3230 // as casts that can alter the value, e.g., AddrSpaceCasts.
3231 if (!isa<ScalableVectorType>(I->getType()) &&
3232 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
3233 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
3234 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3235 break;
3236 case Instruction::PtrToAddr:
3237 // isKnownNonZero() for pointers refers to the address bits being non-zero,
3238 // so we can directly forward.
3239 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3240 case Instruction::PtrToInt:
3241 // For inttoptr, make sure the result size is >= the address size. If the
3242 // address is non-zero, any larger value is also non-zero.
3243 if (Q.DL.getAddressSizeInBits(I->getOperand(0)->getType()) <=
3244 I->getType()->getScalarSizeInBits())
3245 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3246 break;
3247 case Instruction::Trunc:
3248 // nuw/nsw trunc preserves zero/non-zero status of input.
3249 if (auto *TI = dyn_cast<TruncInst>(I))
3250 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
3251 return isKnownNonZero(TI->getOperand(0), DemandedElts, Q, Depth);
3252 break;
3253
3254 // Iff x - y != 0, then x ^ y != 0
3255 // Therefore we can do the same exact checks
3256 case Instruction::Xor:
3257 case Instruction::Sub:
3258 return isNonZeroSub(DemandedElts, Q, BitWidth, I->getOperand(0),
3259 I->getOperand(1), Depth);
3260 case Instruction::Or:
3261 // (X | (X != 0)) is non zero
3262 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
3263 return true;
3264 // X | Y != 0 if X != Y.
3265 if (isKnownNonEqual(I->getOperand(0), I->getOperand(1), DemandedElts, Q,
3266 Depth))
3267 return true;
3268 // X | Y != 0 if X != 0 or Y != 0.
3269 return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) ||
3270 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3271 case Instruction::SExt:
3272 case Instruction::ZExt:
3273 // ext X != 0 if X != 0.
3274 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3275
3276 case Instruction::Shl: {
3277 // shl nsw/nuw can't remove any non-zero bits.
3279 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO))
3280 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3281
3282 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
3283 // if the lowest bit is shifted off the end.
3284 KnownBits Known(BitWidth);
3285 computeKnownBits(I->getOperand(0), DemandedElts, Known, Q, Depth);
3286 if (Known.One[0])
3287 return true;
3288
3289 return isNonZeroShift(I, DemandedElts, Q, Known, Depth);
3290 }
3291 case Instruction::LShr:
3292 case Instruction::AShr: {
3293 // shr exact can only shift out zero bits.
3295 if (BO->isExact())
3296 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3297
3298 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
3299 // defined if the sign bit is shifted off the end.
3300 KnownBits Known =
3301 computeKnownBits(I->getOperand(0), DemandedElts, Q, Depth);
3302 if (Known.isNegative())
3303 return true;
3304
3305 return isNonZeroShift(I, DemandedElts, Q, Known, Depth);
3306 }
3307 case Instruction::UDiv:
3308 case Instruction::SDiv: {
3309 // X / Y
3310 // div exact can only produce a zero if the dividend is zero.
3311 if (cast<PossiblyExactOperator>(I)->isExact())
3312 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
3313
3314 KnownBits XKnown =
3315 computeKnownBits(I->getOperand(0), DemandedElts, Q, Depth);
3316 // If X is fully unknown we won't be able to figure anything out so don't
3317 // both computing knownbits for Y.
3318 if (XKnown.isUnknown())
3319 return false;
3320
3321 KnownBits YKnown =
3322 computeKnownBits(I->getOperand(1), DemandedElts, Q, Depth);
3323 if (I->getOpcode() == Instruction::SDiv) {
3324 // For signed division need to compare abs value of the operands.
3325 XKnown = XKnown.abs(/*IntMinIsPoison*/ false);
3326 YKnown = YKnown.abs(/*IntMinIsPoison*/ false);
3327 }
3328 // If X u>= Y then div is non zero (0/0 is UB).
3329 std::optional<bool> XUgeY = KnownBits::uge(XKnown, YKnown);
3330 // If X is total unknown or X u< Y we won't be able to prove non-zero
3331 // with compute known bits so just return early.
3332 return XUgeY && *XUgeY;
3333 }
3334 case Instruction::Add: {
3335 // X + Y.
3336
3337 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
3338 // non-zero.
3340 return isNonZeroAdd(DemandedElts, Q, BitWidth, I->getOperand(0),
3341 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
3342 Q.IIQ.hasNoUnsignedWrap(BO), Depth);
3343 }
3344 case Instruction::Mul: {
3346 return isNonZeroMul(DemandedElts, Q, BitWidth, I->getOperand(0),
3347 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
3348 Q.IIQ.hasNoUnsignedWrap(BO), Depth);
3349 }
3350 case Instruction::Select: {
3351 // (C ? X : Y) != 0 if X != 0 and Y != 0.
3352
3353 // First check if the arm is non-zero using `isKnownNonZero`. If that fails,
3354 // then see if the select condition implies the arm is non-zero. For example
3355 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is
3356 // dominated by `X != 0`.
3357 auto SelectArmIsNonZero = [&](bool IsTrueArm) {
3358 Value *Op;
3359 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2);
3360 // Op is trivially non-zero.
3361 if (isKnownNonZero(Op, DemandedElts, Q, Depth))
3362 return true;
3363
3364 // The condition of the select dominates the true/false arm. Check if the
3365 // condition implies that a given arm is non-zero.
3366 Value *X;
3367 CmpPredicate Pred;
3368 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X))))
3369 return false;
3370
3371 if (!IsTrueArm)
3372 Pred = ICmpInst::getInversePredicate(Pred);
3373
3374 return cmpExcludesZero(Pred, X);
3375 };
3376
3377 if (SelectArmIsNonZero(/* IsTrueArm */ true) &&
3378 SelectArmIsNonZero(/* IsTrueArm */ false))
3379 return true;
3380 break;
3381 }
3382 case Instruction::PHI: {
3383 auto *PN = cast<PHINode>(I);
3385 return true;
3386
3387 // Check if all incoming values are non-zero using recursion.
3389 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
3390 return llvm::all_of(PN->operands(), [&](const Use &U) {
3391 if (U.get() == PN)
3392 return true;
3393 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
3394 // Check if the branch on the phi excludes zero.
3395 CmpPredicate Pred;
3396 Value *X;
3397 BasicBlock *TrueSucc, *FalseSucc;
3398 if (match(RecQ.CxtI,
3399 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
3400 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
3401 // Check for cases of duplicate successors.
3402 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
3403 // If we're using the false successor, invert the predicate.
3404 if (FalseSucc == PN->getParent())
3405 Pred = CmpInst::getInversePredicate(Pred);
3406 if (cmpExcludesZero(Pred, X))
3407 return true;
3408 }
3409 }
3410 // Finally recurse on the edge and check it directly.
3411 return isKnownNonZero(U.get(), DemandedElts, RecQ, NewDepth);
3412 });
3413 }
3414 case Instruction::InsertElement: {
3415 if (isa<ScalableVectorType>(I->getType()))
3416 break;
3417
3418 const Value *Vec = I->getOperand(0);
3419 const Value *Elt = I->getOperand(1);
3420 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
3421
3422 unsigned NumElts = DemandedElts.getBitWidth();
3423 APInt DemandedVecElts = DemandedElts;
3424 bool SkipElt = false;
3425 // If we know the index we are inserting too, clear it from Vec check.
3426 if (CIdx && CIdx->getValue().ult(NumElts)) {
3427 DemandedVecElts.clearBit(CIdx->getZExtValue());
3428 SkipElt = !DemandedElts[CIdx->getZExtValue()];
3429 }
3430
3431 // Result is zero if Elt is non-zero and rest of the demanded elts in Vec
3432 // are non-zero.
3433 return (SkipElt || isKnownNonZero(Elt, Q, Depth)) &&
3434 (DemandedVecElts.isZero() ||
3435 isKnownNonZero(Vec, DemandedVecElts, Q, Depth));
3436 }
3437 case Instruction::ExtractElement:
3438 if (const auto *EEI = dyn_cast<ExtractElementInst>(I)) {
3439 const Value *Vec = EEI->getVectorOperand();
3440 const Value *Idx = EEI->getIndexOperand();
3441 auto *CIdx = dyn_cast<ConstantInt>(Idx);
3442 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
3443 unsigned NumElts = VecTy->getNumElements();
3444 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
3445 if (CIdx && CIdx->getValue().ult(NumElts))
3446 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
3447 return isKnownNonZero(Vec, DemandedVecElts, Q, Depth);
3448 }
3449 }
3450 break;
3451 case Instruction::ShuffleVector: {
3452 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
3453 if (!Shuf)
3454 break;
3455 APInt DemandedLHS, DemandedRHS;
3456 // For undef elements, we don't know anything about the common state of
3457 // the shuffle result.
3458 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3459 break;
3460 // If demanded elements for both vecs are non-zero, the shuffle is non-zero.
3461 return (DemandedRHS.isZero() ||
3462 isKnownNonZero(Shuf->getOperand(1), DemandedRHS, Q, Depth)) &&
3463 (DemandedLHS.isZero() ||
3464 isKnownNonZero(Shuf->getOperand(0), DemandedLHS, Q, Depth));
3465 }
3466 case Instruction::Freeze:
3467 return isKnownNonZero(I->getOperand(0), Q, Depth) &&
3468 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
3469 Depth);
3470 case Instruction::Load: {
3471 auto *LI = cast<LoadInst>(I);
3472 // A Load tagged with nonnull or dereferenceable with null pointer undefined
3473 // is never null.
3474 if (auto *PtrT = dyn_cast<PointerType>(I->getType())) {
3475 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull) ||
3476 (Q.IIQ.getMetadata(LI, LLVMContext::MD_dereferenceable) &&
3477 !NullPointerIsDefined(LI->getFunction(), PtrT->getAddressSpace())))
3478 return true;
3479 } else if (MDNode *Ranges = Q.IIQ.getMetadata(LI, LLVMContext::MD_range)) {
3481 }
3482
3483 // No need to fall through to computeKnownBits as range metadata is already
3484 // handled in isKnownNonZero.
3485 return false;
3486 }
3487 case Instruction::ExtractValue: {
3488 const WithOverflowInst *WO;
3490 switch (WO->getBinaryOp()) {
3491 default:
3492 break;
3493 case Instruction::Add:
3494 return isNonZeroAdd(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
3495 WO->getArgOperand(1),
3496 /*NSW=*/false,
3497 /*NUW=*/false, Depth);
3498 case Instruction::Sub:
3499 return isNonZeroSub(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
3500 WO->getArgOperand(1), Depth);
3501 case Instruction::Mul:
3502 return isNonZeroMul(DemandedElts, Q, BitWidth, WO->getArgOperand(0),
3503 WO->getArgOperand(1),
3504 /*NSW=*/false, /*NUW=*/false, Depth);
3505 break;
3506 }
3507 }
3508 break;
3509 }
3510 case Instruction::Call:
3511 case Instruction::Invoke: {
3512 const auto *Call = cast<CallBase>(I);
3513 if (I->getType()->isPointerTy()) {
3514 if (Call->isReturnNonNull())
3515 return true;
3516 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
3517 return isKnownNonZero(RP, Q, Depth);
3518 } else {
3519 if (MDNode *Ranges = Q.IIQ.getMetadata(Call, LLVMContext::MD_range))
3521 if (std::optional<ConstantRange> Range = Call->getRange()) {
3522 const APInt ZeroValue(Range->getBitWidth(), 0);
3523 if (!Range->contains(ZeroValue))
3524 return true;
3525 }
3526 if (const Value *RV = Call->getReturnedArgOperand())
3527 if (RV->getType() == I->getType() && isKnownNonZero(RV, Q, Depth))
3528 return true;
3529 }
3530
3531 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
3532 switch (II->getIntrinsicID()) {
3533 case Intrinsic::sshl_sat:
3534 case Intrinsic::ushl_sat:
3535 case Intrinsic::abs:
3536 case Intrinsic::bitreverse:
3537 case Intrinsic::bswap:
3538 case Intrinsic::ctpop:
3539 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3540 // NB: We don't do usub_sat here as in any case we can prove its
3541 // non-zero, we will fold it to `sub nuw` in InstCombine.
3542 case Intrinsic::ssub_sat:
3543 return isNonZeroSub(DemandedElts, Q, BitWidth, II->getArgOperand(0),
3544 II->getArgOperand(1), Depth);
3545 case Intrinsic::sadd_sat:
3546 return isNonZeroAdd(DemandedElts, Q, BitWidth, II->getArgOperand(0),
3547 II->getArgOperand(1),
3548 /*NSW=*/true, /* NUW=*/false, Depth);
3549 // Vec reverse preserves zero/non-zero status from input vec.
3550 case Intrinsic::vector_reverse:
3551 return isKnownNonZero(II->getArgOperand(0), DemandedElts.reverseBits(),
3552 Q, Depth);
3553 // umin/smin/smax/smin/or of all non-zero elements is always non-zero.
3554 case Intrinsic::vector_reduce_or:
3555 case Intrinsic::vector_reduce_umax:
3556 case Intrinsic::vector_reduce_umin:
3557 case Intrinsic::vector_reduce_smax:
3558 case Intrinsic::vector_reduce_smin:
3559 return isKnownNonZero(II->getArgOperand(0), Q, Depth);
3560 case Intrinsic::umax:
3561 case Intrinsic::uadd_sat:
3562 // umax(X, (X != 0)) is non zero
3563 // X +usat (X != 0) is non zero
3564 if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1)))
3565 return true;
3566
3567 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) ||
3568 isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3569 case Intrinsic::smax: {
3570 // If either arg is strictly positive the result is non-zero. Otherwise
3571 // the result is non-zero if both ops are non-zero.
3572 auto IsNonZero = [&](Value *Op, std::optional<bool> &OpNonZero,
3573 const KnownBits &OpKnown) {
3574 if (!OpNonZero.has_value())
3575 OpNonZero = OpKnown.isNonZero() ||
3576 isKnownNonZero(Op, DemandedElts, Q, Depth);
3577 return *OpNonZero;
3578 };
3579 // Avoid re-computing isKnownNonZero.
3580 std::optional<bool> Op0NonZero, Op1NonZero;
3581 KnownBits Op1Known =
3582 computeKnownBits(II->getArgOperand(1), DemandedElts, Q, Depth);
3583 if (Op1Known.isNonNegative() &&
3584 IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known))
3585 return true;
3586 KnownBits Op0Known =
3587 computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth);
3588 if (Op0Known.isNonNegative() &&
3589 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known))
3590 return true;
3591 return IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known) &&
3592 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known);
3593 }
3594 case Intrinsic::smin: {
3595 // If either arg is negative the result is non-zero. Otherwise
3596 // the result is non-zero if both ops are non-zero.
3597 KnownBits Op1Known =
3598 computeKnownBits(II->getArgOperand(1), DemandedElts, Q, Depth);
3599 if (Op1Known.isNegative())
3600 return true;
3601 KnownBits Op0Known =
3602 computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth);
3603 if (Op0Known.isNegative())
3604 return true;
3605
3606 if (Op1Known.isNonZero() && Op0Known.isNonZero())
3607 return true;
3608 }
3609 [[fallthrough]];
3610 case Intrinsic::umin:
3611 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth) &&
3612 isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth);
3613 case Intrinsic::cttz:
3614 return computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth)
3615 .Zero[0];
3616 case Intrinsic::ctlz:
3617 return computeKnownBits(II->getArgOperand(0), DemandedElts, Q, Depth)
3618 .isNonNegative();
3619 case Intrinsic::fshr:
3620 case Intrinsic::fshl:
3621 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0.
3622 if (II->getArgOperand(0) == II->getArgOperand(1))
3623 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3624 break;
3625 case Intrinsic::vscale:
3626 return true;
3627 case Intrinsic::experimental_get_vector_length:
3628 return isKnownNonZero(I->getOperand(0), Q, Depth);
3629 default:
3630 break;
3631 }
3632 break;
3633 }
3634
3635 return false;
3636 }
3637 }
3638
3639 KnownBits Known(BitWidth);
3640 computeKnownBits(I, DemandedElts, Known, Q, Depth);
3641 return Known.One != 0;
3642}
3643
3644/// Return true if the given value is known to be non-zero when defined. For
3645/// vectors, return true if every demanded element is known to be non-zero when
3646/// defined. For pointers, if the context instruction and dominator tree are
3647/// specified, perform context-sensitive analysis and return true if the
3648/// pointer couldn't possibly be null at the specified instruction.
3649/// Supports values with integer or pointer type and vectors of integers.
3650bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
3651 const SimplifyQuery &Q, unsigned Depth) {
3652 Type *Ty = V->getType();
3653
3654#ifndef NDEBUG
3655 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3656
3657 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3658 assert(
3659 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3660 "DemandedElt width should equal the fixed vector number of elements");
3661 } else {
3662 assert(DemandedElts == APInt(1, 1) &&
3663 "DemandedElt width should be 1 for scalars");
3664 }
3665#endif
3666
3667 if (auto *C = dyn_cast<Constant>(V)) {
3668 if (C->isNullValue())
3669 return false;
3670 if (isa<ConstantInt>(C))
3671 // Must be non-zero due to null test above.
3672 return true;
3673
3674 // For constant vectors, check that all elements are poison or known
3675 // non-zero to determine that the whole vector is known non-zero.
3676 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
3677 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3678 if (!DemandedElts[i])
3679 continue;
3680 Constant *Elt = C->getAggregateElement(i);
3681 if (!Elt || Elt->isNullValue())
3682 return false;
3683 if (!isa<PoisonValue>(Elt) && !isa<ConstantInt>(Elt))
3684 return false;
3685 }
3686 return true;
3687 }
3688
3689 // Constant ptrauth can be null, iff the base pointer can be.
3690 if (auto *CPA = dyn_cast<ConstantPtrAuth>(V))
3691 return isKnownNonZero(CPA->getPointer(), DemandedElts, Q, Depth);
3692
3693 // A global variable in address space 0 is non null unless extern weak
3694 // or an absolute symbol reference. Other address spaces may have null as a
3695 // valid address for a global, so we can't assume anything.
3696 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
3697 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3698 GV->getType()->getAddressSpace() == 0)
3699 return true;
3700 }
3701
3702 // For constant expressions, fall through to the Operator code below.
3703 if (!isa<ConstantExpr>(V))
3704 return false;
3705 }
3706
3707 if (const auto *A = dyn_cast<Argument>(V))
3708 if (std::optional<ConstantRange> Range = A->getRange()) {
3709 const APInt ZeroValue(Range->getBitWidth(), 0);
3710 if (!Range->contains(ZeroValue))
3711 return true;
3712 }
3713
3714 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
3715 return true;
3716
3717 // Some of the tests below are recursive, so bail out if we hit the limit.
3719 return false;
3720
3721 // Check for pointer simplifications.
3722
3723 if (PointerType *PtrTy = dyn_cast<PointerType>(Ty)) {
3724 // A byval, inalloca may not be null in a non-default addres space. A
3725 // nonnull argument is assumed never 0.
3726 if (const Argument *A = dyn_cast<Argument>(V)) {
3727 if (((A->hasPassPointeeByValueCopyAttr() &&
3728 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
3729 A->hasNonNullAttr()))
3730 return true;
3731 }
3732 }
3733
3734 if (const auto *I = dyn_cast<Operator>(V))
3735 if (isKnownNonZeroFromOperator(I, DemandedElts, Q, Depth))
3736 return true;
3737
3738 if (!isa<Constant>(V) &&
3740 return true;
3741
3742 if (const Value *Stripped = stripNullTest(V))
3743 return isKnownNonZero(Stripped, DemandedElts, Q, Depth);
3744
3745 return false;
3746}
3747
3749 unsigned Depth) {
3750 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
3751 APInt DemandedElts =
3752 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
3753 return ::isKnownNonZero(V, DemandedElts, Q, Depth);
3754}
3755
3756/// If the pair of operators are the same invertible function, return the
3757/// the operands of the function corresponding to each input. Otherwise,
3758/// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3759/// every input value to exactly one output value. This is equivalent to
3760/// saying that Op1 and Op2 are equal exactly when the specified pair of
3761/// operands are equal, (except that Op1 and Op2 may be poison more often.)
3762static std::optional<std::pair<Value*, Value*>>
3764 const Operator *Op2) {
3765 if (Op1->getOpcode() != Op2->getOpcode())
3766 return std::nullopt;
3767
3768 auto getOperands = [&](unsigned OpNum) -> auto {
3769 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
3770 };
3771
3772 switch (Op1->getOpcode()) {
3773 default:
3774 break;
3775 case Instruction::Or:
3776 if (!cast<PossiblyDisjointInst>(Op1)->isDisjoint() ||
3777 !cast<PossiblyDisjointInst>(Op2)->isDisjoint())
3778 break;
3779 [[fallthrough]];
3780 case Instruction::Xor:
3781 case Instruction::Add: {
3782 Value *Other;
3783 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(0)), m_Value(Other))))
3784 return std::make_pair(Op1->getOperand(1), Other);
3785 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(1)), m_Value(Other))))
3786 return std::make_pair(Op1->getOperand(0), Other);
3787 break;
3788 }
3789 case Instruction::Sub:
3790 if (Op1->getOperand(0) == Op2->getOperand(0))
3791 return getOperands(1);
3792 if (Op1->getOperand(1) == Op2->getOperand(1))
3793 return getOperands(0);
3794 break;
3795 case Instruction::Mul: {
3796 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
3797 // and N is the bitwdith. The nsw case is non-obvious, but proven by
3798 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
3799 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3800 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3801 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3802 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3803 break;
3804
3805 // Assume operand order has been canonicalized
3806 if (Op1->getOperand(1) == Op2->getOperand(1) &&
3807 isa<ConstantInt>(Op1->getOperand(1)) &&
3808 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
3809 return getOperands(0);
3810 break;
3811 }
3812 case Instruction::Shl: {
3813 // Same as multiplies, with the difference that we don't need to check
3814 // for a non-zero multiply. Shifts always multiply by non-zero.
3815 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3816 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3817 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3818 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3819 break;
3820
3821 if (Op1->getOperand(1) == Op2->getOperand(1))
3822 return getOperands(0);
3823 break;
3824 }
3825 case Instruction::AShr:
3826 case Instruction::LShr: {
3827 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3828 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3829 if (!PEO1->isExact() || !PEO2->isExact())
3830 break;
3831
3832 if (Op1->getOperand(1) == Op2->getOperand(1))
3833 return getOperands(0);
3834 break;
3835 }
3836 case Instruction::SExt:
3837 case Instruction::ZExt:
3838 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
3839 return getOperands(0);
3840 break;
3841 case Instruction::PHI: {
3842 const PHINode *PN1 = cast<PHINode>(Op1);
3843 const PHINode *PN2 = cast<PHINode>(Op2);
3844
3845 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
3846 // are a single invertible function of the start values? Note that repeated
3847 // application of an invertible function is also invertible
3848 BinaryOperator *BO1 = nullptr;
3849 Value *Start1 = nullptr, *Step1 = nullptr;
3850 BinaryOperator *BO2 = nullptr;
3851 Value *Start2 = nullptr, *Step2 = nullptr;
3852 if (PN1->getParent() != PN2->getParent() ||
3853 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
3854 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
3855 break;
3856
3857 auto Values = getInvertibleOperands(cast<Operator>(BO1),
3858 cast<Operator>(BO2));
3859 if (!Values)
3860 break;
3861
3862 // We have to be careful of mutually defined recurrences here. Ex:
3863 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
3864 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
3865 // The invertibility of these is complicated, and not worth reasoning
3866 // about (yet?).
3867 if (Values->first != PN1 || Values->second != PN2)
3868 break;
3869
3870 return std::make_pair(Start1, Start2);
3871 }
3872 }
3873 return std::nullopt;
3874}
3875
3876/// Return true if V1 == (binop V2, X), where X is known non-zero.
3877/// Only handle a small subset of binops where (binop V2, X) with non-zero X
3878/// implies V2 != V1.
3879static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2,
3880 const APInt &DemandedElts,
3881 const SimplifyQuery &Q, unsigned Depth) {
3883 if (!BO)
3884 return false;
3885 switch (BO->getOpcode()) {
3886 default:
3887 break;
3888 case Instruction::Or:
3889 if (!cast<PossiblyDisjointInst>(V1)->isDisjoint())
3890 break;
3891 [[fallthrough]];
3892 case Instruction::Xor:
3893 case Instruction::Add:
3894 Value *Op = nullptr;
3895 if (V2 == BO->getOperand(0))
3896 Op = BO->getOperand(1);
3897 else if (V2 == BO->getOperand(1))
3898 Op = BO->getOperand(0);
3899 else
3900 return false;
3901 return isKnownNonZero(Op, DemandedElts, Q, Depth + 1);
3902 }
3903 return false;
3904}
3905
3906/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3907/// the multiplication is nuw or nsw.
3908static bool isNonEqualMul(const Value *V1, const Value *V2,
3909 const APInt &DemandedElts, const SimplifyQuery &Q,
3910 unsigned Depth) {
3911 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3912 const APInt *C;
3913 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
3914 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3915 !C->isZero() && !C->isOne() &&
3916 isKnownNonZero(V1, DemandedElts, Q, Depth + 1);
3917 }
3918 return false;
3919}
3920
3921/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3922/// the shift is nuw or nsw.
3923static bool isNonEqualShl(const Value *V1, const Value *V2,
3924 const APInt &DemandedElts, const SimplifyQuery &Q,
3925 unsigned Depth) {
3926 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3927 const APInt *C;
3928 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
3929 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3930 !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth + 1);
3931 }
3932 return false;
3933}
3934
3935static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
3936 const APInt &DemandedElts, const SimplifyQuery &Q,
3937 unsigned Depth) {
3938 // Check two PHIs are in same block.
3939 if (PN1->getParent() != PN2->getParent())
3940 return false;
3941
3943 bool UsedFullRecursion = false;
3944 for (const BasicBlock *IncomBB : PN1->blocks()) {
3945 if (!VisitedBBs.insert(IncomBB).second)
3946 continue; // Don't reprocess blocks that we have dealt with already.
3947 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
3948 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
3949 const APInt *C1, *C2;
3950 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
3951 continue;
3952
3953 // Only one pair of phi operands is allowed for full recursion.
3954 if (UsedFullRecursion)
3955 return false;
3956
3958 RecQ.CxtI = IncomBB->getTerminator();
3959 if (!isKnownNonEqual(IV1, IV2, DemandedElts, RecQ, Depth + 1))
3960 return false;
3961 UsedFullRecursion = true;
3962 }
3963 return true;
3964}
3965
3966static bool isNonEqualSelect(const Value *V1, const Value *V2,
3967 const APInt &DemandedElts, const SimplifyQuery &Q,
3968 unsigned Depth) {
3969 const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
3970 if (!SI1)
3971 return false;
3972
3973 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) {
3974 const Value *Cond1 = SI1->getCondition();
3975 const Value *Cond2 = SI2->getCondition();
3976 if (Cond1 == Cond2)
3977 return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(),
3978 DemandedElts, Q, Depth + 1) &&
3979 isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(),
3980 DemandedElts, Q, Depth + 1);
3981 }
3982 return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Q, Depth + 1) &&
3983 isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Q, Depth + 1);
3984}
3985
3986// Check to see if A is both a GEP and is the incoming value for a PHI in the
3987// loop, and B is either a ptr or another GEP. If the PHI has 2 incoming values,
3988// one of them being the recursive GEP A and the other a ptr at same base and at
3989// the same/higher offset than B we are only incrementing the pointer further in
3990// loop if offset of recursive GEP is greater than 0.
3992 const SimplifyQuery &Q) {
3993 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
3994 return false;
3995
3996 auto *GEPA = dyn_cast<GEPOperator>(A);
3997 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin()))
3998 return false;
3999
4000 // Handle 2 incoming PHI values with one being a recursive GEP.
4001 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand());
4002 if (!PN || PN->getNumIncomingValues() != 2)
4003 return false;
4004
4005 // Search for the recursive GEP as an incoming operand, and record that as
4006 // Step.
4007 Value *Start = nullptr;
4008 Value *Step = const_cast<Value *>(A);
4009 if (PN->getIncomingValue(0) == Step)
4010 Start = PN->getIncomingValue(1);
4011 else if (PN->getIncomingValue(1) == Step)
4012 Start = PN->getIncomingValue(0);
4013 else
4014 return false;
4015
4016 // Other incoming node base should match the B base.
4017 // StartOffset >= OffsetB && StepOffset > 0?
4018 // StartOffset <= OffsetB && StepOffset < 0?
4019 // Is non-equal if above are true.
4020 // We use stripAndAccumulateInBoundsConstantOffsets to restrict the
4021 // optimisation to inbounds GEPs only.
4022 unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(Start->getType());
4023 APInt StartOffset(IndexWidth, 0);
4024 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StartOffset);
4025 APInt StepOffset(IndexWidth, 0);
4026 Step = Step->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StepOffset);
4027
4028 // Check if Base Pointer of Step matches the PHI.
4029 if (Step != PN)
4030 return false;
4031 APInt OffsetB(IndexWidth, 0);
4032 B = B->stripAndAccumulateInBoundsConstantOffsets(Q.DL, OffsetB);
4033 return Start == B &&
4034 ((StartOffset.sge(OffsetB) && StepOffset.isStrictlyPositive()) ||
4035 (StartOffset.sle(OffsetB) && StepOffset.isNegative()));
4036}
4037
4038static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2,
4039 const SimplifyQuery &Q, unsigned Depth) {
4040 if (!Q.CxtI)
4041 return false;
4042
4043 // Try to infer NonEqual based on information from dominating conditions.
4044 if (Q.DC && Q.DT) {
4045 auto IsKnownNonEqualFromDominatingCondition = [&](const Value *V) {
4046 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
4047 Value *Cond = BI->getCondition();
4048 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
4049 if (Q.DT->dominates(Edge0, Q.CxtI->getParent()) &&
4051 /*LHSIsTrue=*/true, Depth)
4052 .value_or(false))
4053 return true;
4054
4055 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
4056 if (Q.DT->dominates(Edge1, Q.CxtI->getParent()) &&
4058 /*LHSIsTrue=*/false, Depth)
4059 .value_or(false))
4060 return true;
4061 }
4062
4063 return false;
4064 };
4065
4066 if (IsKnownNonEqualFromDominatingCondition(V1) ||
4067 IsKnownNonEqualFromDominatingCondition(V2))
4068 return true;
4069 }
4070
4071 if (!Q.AC)
4072 return false;
4073
4074 // Try to infer NonEqual based on information from assumptions.
4075 for (auto &AssumeVH : Q.AC->assumptionsFor(V1)) {
4076 if (!AssumeVH)
4077 continue;
4078 CallInst *I = cast<CallInst>(AssumeVH);
4079
4080 assert(I->getFunction() == Q.CxtI->getFunction() &&
4081 "Got assumption for the wrong function!");
4082 assert(I->getIntrinsicID() == Intrinsic::assume &&
4083 "must be an assume intrinsic");
4084
4085 if (isImpliedCondition(I->getArgOperand(0), ICmpInst::ICMP_NE, V1, V2, Q.DL,
4086 /*LHSIsTrue=*/true, Depth)
4087 .value_or(false) &&
4089 return true;
4090 }
4091
4092 return false;
4093}
4094
4095/// Return true if it is known that V1 != V2.
4096static bool isKnownNonEqual(const Value *V1, const Value *V2,
4097 const APInt &DemandedElts, const SimplifyQuery &Q,
4098 unsigned Depth) {
4099 if (V1 == V2)
4100 return false;
4101 if (V1->getType() != V2->getType())
4102 // We can't look through casts yet.
4103 return false;
4104
4106 return false;
4107
4108 // See if we can recurse through (exactly one of) our operands. This
4109 // requires our operation be 1-to-1 and map every input value to exactly
4110 // one output value. Such an operation is invertible.
4111 auto *O1 = dyn_cast<Operator>(V1);
4112 auto *O2 = dyn_cast<Operator>(V2);
4113 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
4114 if (auto Values = getInvertibleOperands(O1, O2))
4115 return isKnownNonEqual(Values->first, Values->second, DemandedElts, Q,
4116 Depth + 1);
4117
4118 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
4119 const PHINode *PN2 = cast<PHINode>(V2);
4120 // FIXME: This is missing a generalization to handle the case where one is
4121 // a PHI and another one isn't.
4122 if (isNonEqualPHIs(PN1, PN2, DemandedElts, Q, Depth))
4123 return true;
4124 };
4125 }
4126
4127 if (isModifyingBinopOfNonZero(V1, V2, DemandedElts, Q, Depth) ||
4128 isModifyingBinopOfNonZero(V2, V1, DemandedElts, Q, Depth))
4129 return true;
4130
4131 if (isNonEqualMul(V1, V2, DemandedElts, Q, Depth) ||
4132 isNonEqualMul(V2, V1, DemandedElts, Q, Depth))
4133 return true;
4134
4135 if (isNonEqualShl(V1, V2, DemandedElts, Q, Depth) ||
4136 isNonEqualShl(V2, V1, DemandedElts, Q, Depth))
4137 return true;
4138
4139 if (V1->getType()->isIntOrIntVectorTy()) {
4140 // Are any known bits in V1 contradictory to known bits in V2? If V1
4141 // has a known zero where V2 has a known one, they must not be equal.
4142 KnownBits Known1 = computeKnownBits(V1, DemandedElts, Q, Depth);
4143 if (!Known1.isUnknown()) {
4144 KnownBits Known2 = computeKnownBits(V2, DemandedElts, Q, Depth);
4145 if (Known1.Zero.intersects(Known2.One) ||
4146 Known2.Zero.intersects(Known1.One))
4147 return true;
4148 }
4149 }
4150
4151 if (isNonEqualSelect(V1, V2, DemandedElts, Q, Depth) ||
4152 isNonEqualSelect(V2, V1, DemandedElts, Q, Depth))
4153 return true;
4154
4155 if (isNonEqualPointersWithRecursiveGEP(V1, V2, Q) ||
4157 return true;
4158
4159 Value *A, *B;
4160 // PtrToInts are NonEqual if their Ptrs are NonEqual.
4161 // Check PtrToInt type matches the pointer size.
4162 if (match(V1, m_PtrToIntSameSize(Q.DL, m_Value(A))) &&
4164 return isKnownNonEqual(A, B, DemandedElts, Q, Depth + 1);
4165
4166 if (isKnownNonEqualFromContext(V1, V2, Q, Depth))
4167 return true;
4168
4169 return false;
4170}
4171
4172/// For vector constants, loop over the elements and find the constant with the
4173/// minimum number of sign bits. Return 0 if the value is not a vector constant
4174/// or if any element was not analyzed; otherwise, return the count for the
4175/// element with the minimum number of sign bits.
4177 const APInt &DemandedElts,
4178 unsigned TyBits) {
4179 const auto *CV = dyn_cast<Constant>(V);
4180 if (!CV || !isa<FixedVectorType>(CV->getType()))
4181 return 0;
4182
4183 unsigned MinSignBits = TyBits;
4184 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
4185 for (unsigned i = 0; i != NumElts; ++i) {
4186 if (!DemandedElts[i])
4187 continue;
4188 // If we find a non-ConstantInt, bail out.
4189 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
4190 if (!Elt)
4191 return 0;
4192
4193 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
4194 }
4195
4196 return MinSignBits;
4197}
4198
4199static unsigned ComputeNumSignBitsImpl(const Value *V,
4200 const APInt &DemandedElts,
4201 const SimplifyQuery &Q, unsigned Depth);
4202
4203static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
4204 const SimplifyQuery &Q, unsigned Depth) {
4205 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Q, Depth);
4206 assert(Result > 0 && "At least one sign bit needs to be present!");
4207 return Result;
4208}
4209
4210/// Return the number of times the sign bit of the register is replicated into
4211/// the other bits. We know that at least 1 bit is always equal to the sign bit
4212/// (itself), but other cases can give us information. For example, immediately
4213/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
4214/// other, so we return 3. For vectors, return the number of sign bits for the
4215/// vector element with the minimum number of known sign bits of the demanded
4216/// elements in the vector specified by DemandedElts.
4217static unsigned ComputeNumSignBitsImpl(const Value *V,
4218 const APInt &DemandedElts,
4219 const SimplifyQuery &Q, unsigned Depth) {
4220 Type *Ty = V->getType();
4221#ifndef NDEBUG
4222 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
4223
4224 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
4225 assert(
4226 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
4227 "DemandedElt width should equal the fixed vector number of elements");
4228 } else {
4229 assert(DemandedElts == APInt(1, 1) &&
4230 "DemandedElt width should be 1 for scalars");
4231 }
4232#endif
4233
4234 // We return the minimum number of sign bits that are guaranteed to be present
4235 // in V, so for undef we have to conservatively return 1. We don't have the
4236 // same behavior for poison though -- that's a FIXME today.
4237
4238 Type *ScalarTy = Ty->getScalarType();
4239 unsigned TyBits = ScalarTy->isPointerTy() ?
4240 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
4241 Q.DL.getTypeSizeInBits(ScalarTy);
4242
4243 unsigned Tmp, Tmp2;
4244 unsigned FirstAnswer = 1;
4245
4246 // Note that ConstantInt is handled by the general computeKnownBits case
4247 // below.
4248
4250 return 1;
4251
4252 if (auto *U = dyn_cast<Operator>(V)) {
4253 switch (Operator::getOpcode(V)) {
4254 default: break;
4255 case Instruction::BitCast: {
4256 Value *Src = U->getOperand(0);
4257 Type *SrcTy = Src->getType();
4258
4259 // Skip if the source type is not an integer or integer vector type
4260 // This ensures we only process integer-like types
4261 if (!SrcTy->isIntOrIntVectorTy())
4262 break;
4263
4264 unsigned SrcBits = SrcTy->getScalarSizeInBits();
4265
4266 // Bitcast 'large element' scalar/vector to 'small element' vector.
4267 if ((SrcBits % TyBits) != 0)
4268 break;
4269
4270 // Only proceed if the destination type is a fixed-size vector
4271 if (isa<FixedVectorType>(Ty)) {
4272 // Fast case - sign splat can be simply split across the small elements.
4273 // This works for both vector and scalar sources
4274 Tmp = ComputeNumSignBits(Src, Q, Depth + 1);
4275 if (Tmp == SrcBits)
4276 return TyBits;
4277 }
4278 break;
4279 }
4280 case Instruction::SExt:
4281 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
4282 return ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1) +
4283 Tmp;
4284
4285 case Instruction::SDiv: {
4286 const APInt *Denominator;
4287 // sdiv X, C -> adds log(C) sign bits.
4288 if (match(U->getOperand(1), m_APInt(Denominator))) {
4289
4290 // Ignore non-positive denominator.
4291 if (!Denominator->isStrictlyPositive())
4292 break;
4293
4294 // Calculate the incoming numerator bits.
4295 unsigned NumBits =
4296 ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4297
4298 // Add floor(log(C)) bits to the numerator bits.
4299 return std::min(TyBits, NumBits + Denominator->logBase2());
4300 }
4301 break;
4302 }
4303
4304 case Instruction::SRem: {
4305 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4306
4307 const APInt *Denominator;
4308 // srem X, C -> we know that the result is within [-C+1,C) when C is a
4309 // positive constant. This let us put a lower bound on the number of sign
4310 // bits.
4311 if (match(U->getOperand(1), m_APInt(Denominator))) {
4312
4313 // Ignore non-positive denominator.
4314 if (Denominator->isStrictlyPositive()) {
4315 // Calculate the leading sign bit constraints by examining the
4316 // denominator. Given that the denominator is positive, there are two
4317 // cases:
4318 //
4319 // 1. The numerator is positive. The result range is [0,C) and
4320 // [0,C) u< (1 << ceilLogBase2(C)).
4321 //
4322 // 2. The numerator is negative. Then the result range is (-C,0] and
4323 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
4324 //
4325 // Thus a lower bound on the number of sign bits is `TyBits -
4326 // ceilLogBase2(C)`.
4327
4328 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
4329 Tmp = std::max(Tmp, ResBits);
4330 }
4331 }
4332 return Tmp;
4333 }
4334
4335 case Instruction::AShr: {
4336 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4337 // ashr X, C -> adds C sign bits. Vectors too.
4338 const APInt *ShAmt;
4339 if (match(U->getOperand(1), m_APInt(ShAmt))) {
4340 if (ShAmt->uge(TyBits))
4341 break; // Bad shift.
4342 unsigned ShAmtLimited = ShAmt->getZExtValue();
4343 Tmp += ShAmtLimited;
4344 if (Tmp > TyBits) Tmp = TyBits;
4345 }
4346 return Tmp;
4347 }
4348 case Instruction::Shl: {
4349 const APInt *ShAmt;
4350 Value *X = nullptr;
4351 if (match(U->getOperand(1), m_APInt(ShAmt))) {
4352 // shl destroys sign bits.
4353 if (ShAmt->uge(TyBits))
4354 break; // Bad shift.
4355 // We can look through a zext (more or less treating it as a sext) if
4356 // all extended bits are shifted out.
4357 if (match(U->getOperand(0), m_ZExt(m_Value(X))) &&
4358 ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) {
4359 Tmp = ComputeNumSignBits(X, DemandedElts, Q, Depth + 1);
4360 Tmp += TyBits - X->getType()->getScalarSizeInBits();
4361 } else
4362 Tmp =
4363 ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4364 if (ShAmt->uge(Tmp))
4365 break; // Shifted all sign bits out.
4366 Tmp2 = ShAmt->getZExtValue();
4367 return Tmp - Tmp2;
4368 }
4369 break;
4370 }
4371 case Instruction::And:
4372 case Instruction::Or:
4373 case Instruction::Xor: // NOT is handled here.
4374 // Logical binary ops preserve the number of sign bits at the worst.
4375 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4376 if (Tmp != 1) {
4377 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
4378 FirstAnswer = std::min(Tmp, Tmp2);
4379 // We computed what we know about the sign bits as our first
4380 // answer. Now proceed to the generic code that uses
4381 // computeKnownBits, and pick whichever answer is better.
4382 }
4383 break;
4384
4385 case Instruction::Select: {
4386 // If we have a clamp pattern, we know that the number of sign bits will
4387 // be the minimum of the clamp min/max range.
4388 const Value *X;
4389 const APInt *CLow, *CHigh;
4390 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
4391 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
4392
4393 Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
4394 if (Tmp == 1)
4395 break;
4396 Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Q, Depth + 1);
4397 return std::min(Tmp, Tmp2);
4398 }
4399
4400 case Instruction::Add:
4401 // Add can have at most one carry bit. Thus we know that the output
4402 // is, at worst, one more bit than the inputs.
4403 Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
4404 if (Tmp == 1) break;
4405
4406 // Special case decrementing a value (ADD X, -1):
4407 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
4408 if (CRHS->isAllOnesValue()) {
4409 KnownBits Known(TyBits);
4410 computeKnownBits(U->getOperand(0), DemandedElts, Known, Q, Depth + 1);
4411
4412 // If the input is known to be 0 or 1, the output is 0/-1, which is
4413 // all sign bits set.
4414 if ((Known.Zero | 1).isAllOnes())
4415 return TyBits;
4416
4417 // If we are subtracting one from a positive number, there is no carry
4418 // out of the result.
4419 if (Known.isNonNegative())
4420 return Tmp;
4421 }
4422
4423 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
4424 if (Tmp2 == 1)
4425 break;
4426 return std::min(Tmp, Tmp2) - 1;
4427
4428 case Instruction::Sub:
4429 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
4430 if (Tmp2 == 1)
4431 break;
4432
4433 // Handle NEG.
4434 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
4435 if (CLHS->isNullValue()) {
4436 KnownBits Known(TyBits);
4437 computeKnownBits(U->getOperand(1), DemandedElts, Known, Q, Depth + 1);
4438 // If the input is known to be 0 or 1, the output is 0/-1, which is
4439 // all sign bits set.
4440 if ((Known.Zero | 1).isAllOnes())
4441 return TyBits;
4442
4443 // If the input is known to be positive (the sign bit is known clear),
4444 // the output of the NEG has the same number of sign bits as the
4445 // input.
4446 if (Known.isNonNegative())
4447 return Tmp2;
4448
4449 // Otherwise, we treat this like a SUB.
4450 }
4451
4452 // Sub can have at most one carry bit. Thus we know that the output
4453 // is, at worst, one more bit than the inputs.
4454 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4455 if (Tmp == 1)
4456 break;
4457 return std::min(Tmp, Tmp2) - 1;
4458
4459 case Instruction::Mul: {
4460 // The output of the Mul can be at most twice the valid bits in the
4461 // inputs.
4462 unsigned SignBitsOp0 =
4463 ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4464 if (SignBitsOp0 == 1)
4465 break;
4466 unsigned SignBitsOp1 =
4467 ComputeNumSignBits(U->getOperand(1), DemandedElts, Q, Depth + 1);
4468 if (SignBitsOp1 == 1)
4469 break;
4470 unsigned OutValidBits =
4471 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
4472 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
4473 }
4474
4475 case Instruction::PHI: {
4476 const PHINode *PN = cast<PHINode>(U);
4477 unsigned NumIncomingValues = PN->getNumIncomingValues();
4478 // Don't analyze large in-degree PHIs.
4479 if (NumIncomingValues > 4) break;
4480 // Unreachable blocks may have zero-operand PHI nodes.
4481 if (NumIncomingValues == 0) break;
4482
4483 // Take the minimum of all incoming values. This can't infinitely loop
4484 // because of our depth threshold.
4486 Tmp = TyBits;
4487 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4488 if (Tmp == 1) return Tmp;
4489 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
4490 Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i),
4491 DemandedElts, RecQ, Depth + 1));
4492 }
4493 return Tmp;
4494 }
4495
4496 case Instruction::Trunc: {
4497 // If the input contained enough sign bits that some remain after the
4498 // truncation, then we can make use of that. Otherwise we don't know
4499 // anything.
4500 Tmp = ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
4501 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4502 if (Tmp > (OperandTyBits - TyBits))
4503 return Tmp - (OperandTyBits - TyBits);
4504
4505 return 1;
4506 }
4507
4508 case Instruction::ExtractElement:
4509 // Look through extract element. At the moment we keep this simple and
4510 // skip tracking the specific element. But at least we might find
4511 // information valid for all elements of the vector (for example if vector
4512 // is sign extended, shifted, etc).
4513 return ComputeNumSignBits(U->getOperand(0), Q, Depth + 1);
4514
4515 case Instruction::ShuffleVector: {
4516 // Collect the minimum number of sign bits that are shared by every vector
4517 // element referenced by the shuffle.
4518 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
4519 if (!Shuf) {
4520 // FIXME: Add support for shufflevector constant expressions.
4521 return 1;
4522 }
4523 APInt DemandedLHS, DemandedRHS;
4524 // For undef elements, we don't know anything about the common state of
4525 // the shuffle result.
4526 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
4527 return 1;
4528 Tmp = std::numeric_limits<unsigned>::max();
4529 if (!!DemandedLHS) {
4530 const Value *LHS = Shuf->getOperand(0);
4531 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Q, Depth + 1);
4532 }
4533 // If we don't know anything, early out and try computeKnownBits
4534 // fall-back.
4535 if (Tmp == 1)
4536 break;
4537 if (!!DemandedRHS) {
4538 const Value *RHS = Shuf->getOperand(1);
4539 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Q, Depth + 1);
4540 Tmp = std::min(Tmp, Tmp2);
4541 }
4542 // If we don't know anything, early out and try computeKnownBits
4543 // fall-back.
4544 if (Tmp == 1)
4545 break;
4546 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
4547 return Tmp;
4548 }
4549 case Instruction::Call: {
4550 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
4551 switch (II->getIntrinsicID()) {
4552 default:
4553 break;
4554 case Intrinsic::abs:
4555 Tmp =
4556 ComputeNumSignBits(U->getOperand(0), DemandedElts, Q, Depth + 1);
4557 if (Tmp == 1)
4558 break;
4559
4560 // Absolute value reduces number of sign bits by at most 1.
4561 return Tmp - 1;
4562 case Intrinsic::smin:
4563 case Intrinsic::smax: {
4564 const APInt *CLow, *CHigh;
4565 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
4566 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
4567 }
4568 }
4569 }
4570 }
4571 }
4572 }
4573
4574 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4575 // use this information.
4576
4577 // If we can examine all elements of a vector constant successfully, we're
4578 // done (we can't do any better than that). If not, keep trying.
4579 if (unsigned VecSignBits =
4580 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
4581 return VecSignBits;
4582
4583 KnownBits Known(TyBits);
4584 computeKnownBits(V, DemandedElts, Known, Q, Depth);
4585
4586 // If we know that the sign bit is either zero or one, determine the number of
4587 // identical bits in the top of the input value.
4588 return std::max(FirstAnswer, Known.countMinSignBits());
4589}
4590
4592 const TargetLibraryInfo *TLI) {
4593 const Function *F = CB.getCalledFunction();
4594 if (!F)
4596
4597 if (F->isIntrinsic())
4598 return F->getIntrinsicID();
4599
4600 // We are going to infer semantics of a library function based on mapping it
4601 // to an LLVM intrinsic. Check that the library function is available from
4602 // this callbase and in this environment.
4603 LibFunc Func;
4604 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
4605 !CB.onlyReadsMemory())
4607
4608 switch (Func) {
4609 default:
4610 break;
4611 case LibFunc_sin:
4612 case LibFunc_sinf:
4613 case LibFunc_sinl:
4614 return Intrinsic::sin;
4615 case LibFunc_cos:
4616 case LibFunc_cosf:
4617 case LibFunc_cosl:
4618 return Intrinsic::cos;
4619 case LibFunc_tan:
4620 case LibFunc_tanf:
4621 case LibFunc_tanl:
4622 return Intrinsic::tan;
4623 case LibFunc_asin:
4624 case LibFunc_asinf:
4625 case LibFunc_asinl:
4626 return Intrinsic::asin;
4627 case LibFunc_acos:
4628 case LibFunc_acosf:
4629 case LibFunc_acosl:
4630 return Intrinsic::acos;
4631 case LibFunc_atan:
4632 case LibFunc_atanf:
4633 case LibFunc_atanl:
4634 return Intrinsic::atan;
4635 case LibFunc_atan2:
4636 case LibFunc_atan2f:
4637 case LibFunc_atan2l:
4638 return Intrinsic::atan2;
4639 case LibFunc_sinh:
4640 case LibFunc_sinhf:
4641 case LibFunc_sinhl:
4642 return Intrinsic::sinh;
4643 case LibFunc_cosh:
4644 case LibFunc_coshf:
4645 case LibFunc_coshl:
4646 return Intrinsic::cosh;
4647 case LibFunc_tanh:
4648 case LibFunc_tanhf:
4649 case LibFunc_tanhl:
4650 return Intrinsic::tanh;
4651 case LibFunc_exp:
4652 case LibFunc_expf:
4653 case LibFunc_expl:
4654 return Intrinsic::exp;
4655 case LibFunc_exp2:
4656 case LibFunc_exp2f:
4657 case LibFunc_exp2l:
4658 return Intrinsic::exp2;
4659 case LibFunc_exp10:
4660 case LibFunc_exp10f:
4661 case LibFunc_exp10l:
4662 return Intrinsic::exp10;
4663 case LibFunc_log:
4664 case LibFunc_logf:
4665 case LibFunc_logl:
4666 return Intrinsic::log;
4667 case LibFunc_log10:
4668 case LibFunc_log10f:
4669 case LibFunc_log10l:
4670 return Intrinsic::log10;
4671 case LibFunc_log2:
4672 case LibFunc_log2f:
4673 case LibFunc_log2l:
4674 return Intrinsic::log2;
4675 case LibFunc_fabs:
4676 case LibFunc_fabsf:
4677 case LibFunc_fabsl:
4678 return Intrinsic::fabs;
4679 case LibFunc_fmin:
4680 case LibFunc_fminf:
4681 case LibFunc_fminl:
4682 return Intrinsic::minnum;
4683 case LibFunc_fmax:
4684 case LibFunc_fmaxf:
4685 case LibFunc_fmaxl:
4686 return Intrinsic::maxnum;
4687 case LibFunc_copysign:
4688 case LibFunc_copysignf:
4689 case LibFunc_copysignl:
4690 return Intrinsic::copysign;
4691 case LibFunc_floor:
4692 case LibFunc_floorf:
4693 case LibFunc_floorl:
4694 return Intrinsic::floor;
4695 case LibFunc_ceil:
4696 case LibFunc_ceilf:
4697 case LibFunc_ceill:
4698 return Intrinsic::ceil;
4699 case LibFunc_trunc:
4700 case LibFunc_truncf:
4701 case LibFunc_truncl:
4702 return Intrinsic::trunc;
4703 case LibFunc_rint:
4704 case LibFunc_rintf:
4705 case LibFunc_rintl:
4706 return Intrinsic::rint;
4707 case LibFunc_nearbyint:
4708 case LibFunc_nearbyintf:
4709 case LibFunc_nearbyintl:
4710 return Intrinsic::nearbyint;
4711 case LibFunc_round:
4712 case LibFunc_roundf:
4713 case LibFunc_roundl:
4714 return Intrinsic::round;
4715 case LibFunc_roundeven:
4716 case LibFunc_roundevenf:
4717 case LibFunc_roundevenl:
4718 return Intrinsic::roundeven;
4719 case LibFunc_pow:
4720 case LibFunc_powf:
4721 case LibFunc_powl:
4722 return Intrinsic::pow;
4723 case LibFunc_sqrt:
4724 case LibFunc_sqrtf:
4725 case LibFunc_sqrtl:
4726 return Intrinsic::sqrt;
4727 }
4728
4730}
4731
4732/// Given an exploded icmp instruction, return true if the comparison only
4733/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
4734/// the result of the comparison is true when the input value is signed.
4736 bool &TrueIfSigned) {
4737 switch (Pred) {
4738 case ICmpInst::ICMP_SLT: // True if LHS s< 0
4739 TrueIfSigned = true;
4740 return RHS.isZero();
4741 case ICmpInst::ICMP_SLE: // True if LHS s<= -1
4742 TrueIfSigned = true;
4743 return RHS.isAllOnes();
4744 case ICmpInst::ICMP_SGT: // True if LHS s> -1
4745 TrueIfSigned = false;
4746 return RHS.isAllOnes();
4747 case ICmpInst::ICMP_SGE: // True if LHS s>= 0
4748 TrueIfSigned = false;
4749 return RHS.isZero();
4750 case ICmpInst::ICMP_UGT:
4751 // True if LHS u> RHS and RHS == sign-bit-mask - 1
4752 TrueIfSigned = true;
4753 return RHS.isMaxSignedValue();
4754 case ICmpInst::ICMP_UGE:
4755 // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
4756 TrueIfSigned = true;
4757 return RHS.isMinSignedValue();
4758 case ICmpInst::ICMP_ULT:
4759 // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
4760 TrueIfSigned = false;
4761 return RHS.isMinSignedValue();
4762 case ICmpInst::ICMP_ULE:
4763 // True if LHS u<= RHS and RHS == sign-bit-mask - 1
4764 TrueIfSigned = false;
4765 return RHS.isMaxSignedValue();
4766 default:
4767 return false;
4768 }
4769}
4770
4772 bool CondIsTrue,
4773 const Instruction *CxtI,
4774 KnownFPClass &KnownFromContext,
4775 unsigned Depth = 0) {
4776 Value *A, *B;
4778 (CondIsTrue ? match(Cond, m_LogicalAnd(m_Value(A), m_Value(B)))
4779 : match(Cond, m_LogicalOr(m_Value(A), m_Value(B))))) {
4780 computeKnownFPClassFromCond(V, A, CondIsTrue, CxtI, KnownFromContext,
4781 Depth + 1);
4782 computeKnownFPClassFromCond(V, B, CondIsTrue, CxtI, KnownFromContext,
4783 Depth + 1);
4784 return;
4785 }
4787 computeKnownFPClassFromCond(V, A, !CondIsTrue, CxtI, KnownFromContext,
4788 Depth + 1);
4789 return;
4790 }
4791 CmpPredicate Pred;
4792 Value *LHS;
4793 uint64_t ClassVal = 0;
4794 const APFloat *CRHS;
4795 const APInt *RHS;
4796 if (match(Cond, m_FCmp(Pred, m_Value(LHS), m_APFloat(CRHS)))) {
4797 auto [CmpVal, MaskIfTrue, MaskIfFalse] = fcmpImpliesClass(
4798 Pred, *cast<Instruction>(Cond)->getParent()->getParent(), LHS, *CRHS,
4799 LHS != V);
4800 if (CmpVal == V)
4801 KnownFromContext.knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4803 m_Specific(V), m_ConstantInt(ClassVal)))) {
4804 FPClassTest Mask = static_cast<FPClassTest>(ClassVal);
4805 KnownFromContext.knownNot(CondIsTrue ? ~Mask : Mask);
4806 } else if (match(Cond, m_ICmp(Pred, m_ElementWiseBitCast(m_Specific(V)),
4807 m_APInt(RHS)))) {
4808 bool TrueIfSigned;
4809 if (!isSignBitCheck(Pred, *RHS, TrueIfSigned))
4810 return;
4811 if (TrueIfSigned == CondIsTrue)
4812 KnownFromContext.signBitMustBeOne();
4813 else
4814 KnownFromContext.signBitMustBeZero();
4815 }
4816}
4817
4819 const SimplifyQuery &Q) {
4820 KnownFPClass KnownFromContext;
4821
4822 if (Q.CC && Q.CC->AffectedValues.contains(V))
4824 KnownFromContext);
4825
4826 if (!Q.CxtI)
4827 return KnownFromContext;
4828
4829 if (Q.DC && Q.DT) {
4830 // Handle dominating conditions.
4831 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
4832 Value *Cond = BI->getCondition();
4833
4834 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
4835 if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
4836 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/true, Q.CxtI,
4837 KnownFromContext);
4838
4839 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
4840 if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
4841 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/false, Q.CxtI,
4842 KnownFromContext);
4843 }
4844 }
4845
4846 if (!Q.AC)
4847 return KnownFromContext;
4848
4849 // Try to restrict the floating-point classes based on information from
4850 // assumptions.
4851 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
4852 if (!AssumeVH)
4853 continue;
4854 CallInst *I = cast<CallInst>(AssumeVH);
4855
4856 assert(I->getFunction() == Q.CxtI->getParent()->getParent() &&
4857 "Got assumption for the wrong function!");
4858 assert(I->getIntrinsicID() == Intrinsic::assume &&
4859 "must be an assume intrinsic");
4860
4861 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
4862 continue;
4863
4864 computeKnownFPClassFromCond(V, I->getArgOperand(0),
4865 /*CondIsTrue=*/true, Q.CxtI, KnownFromContext);
4866 }
4867
4868 return KnownFromContext;
4869}
4870
4872 Value *Arm, bool Invert,
4873 const SimplifyQuery &SQ,
4874 unsigned Depth) {
4876 /*CondIsTrue=*/!Invert, SQ.CxtI, Known,
4877 Depth + 1);
4878 // TODO: Do we need to check isGuaranteedNotToBeUndef, like the KnownBits
4879 // case?
4880}
4881
4882void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4883 FPClassTest InterestedClasses, KnownFPClass &Known,
4884 const SimplifyQuery &Q, unsigned Depth);
4885
4886static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
4887 FPClassTest InterestedClasses,
4888 const SimplifyQuery &Q, unsigned Depth) {
4889 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4890 APInt DemandedElts =
4891 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
4892 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Q, Depth);
4893}
4894
4896 const APInt &DemandedElts,
4897 FPClassTest InterestedClasses,
4898 KnownFPClass &Known,
4899 const SimplifyQuery &Q,
4900 unsigned Depth) {
4901 if ((InterestedClasses &
4903 return;
4904
4905 KnownFPClass KnownSrc;
4906 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4907 KnownSrc, Q, Depth + 1);
4908
4909 // Sign should be preserved
4910 // TODO: Handle cannot be ordered greater than zero
4911 if (KnownSrc.cannotBeOrderedLessThanZero())
4913
4914 Known.propagateNaN(KnownSrc, true);
4915
4916 // Infinity needs a range check.
4917}
4918
4920 switch (IID) {
4921 case Intrinsic::minimum:
4923 case Intrinsic::maximum:
4925 case Intrinsic::minimumnum:
4927 case Intrinsic::maximumnum:
4929 case Intrinsic::minnum:
4931 case Intrinsic::maxnum:
4933 default:
4934 llvm_unreachable("not a floating-point min-max intrinsic");
4935 }
4936}
4937
4938void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4939 FPClassTest InterestedClasses, KnownFPClass &Known,
4940 const SimplifyQuery &Q, unsigned Depth) {
4941 assert(Known.isUnknown() && "should not be called with known information");
4942
4943 if (!DemandedElts) {
4944 // No demanded elts, better to assume we don't know anything.
4945 Known.resetAll();
4946 return;
4947 }
4948
4949 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
4950
4951 if (auto *CFP = dyn_cast<ConstantFP>(V)) {
4952 Known = KnownFPClass(CFP->getValueAPF());
4953 return;
4954 }
4955
4957 Known.KnownFPClasses = fcPosZero;
4958 Known.SignBit = false;
4959 return;
4960 }
4961
4962 if (isa<PoisonValue>(V)) {
4963 Known.KnownFPClasses = fcNone;
4964 Known.SignBit = false;
4965 return;
4966 }
4967
4968 // Try to handle fixed width vector constants
4969 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4970 const Constant *CV = dyn_cast<Constant>(V);
4971 if (VFVTy && CV) {
4972 Known.KnownFPClasses = fcNone;
4973 bool SignBitAllZero = true;
4974 bool SignBitAllOne = true;
4975
4976 // For vectors, verify that each element is not NaN.
4977 unsigned NumElts = VFVTy->getNumElements();
4978 for (unsigned i = 0; i != NumElts; ++i) {
4979 if (!DemandedElts[i])
4980 continue;
4981
4982 Constant *Elt = CV->getAggregateElement(i);
4983 if (!Elt) {
4984 Known = KnownFPClass();
4985 return;
4986 }
4987 if (isa<PoisonValue>(Elt))
4988 continue;
4989 auto *CElt = dyn_cast<ConstantFP>(Elt);
4990 if (!CElt) {
4991 Known = KnownFPClass();
4992 return;
4993 }
4994
4995 const APFloat &C = CElt->getValueAPF();
4996 Known.KnownFPClasses |= C.classify();
4997 if (C.isNegative())
4998 SignBitAllZero = false;
4999 else
5000 SignBitAllOne = false;
5001 }
5002 if (SignBitAllOne != SignBitAllZero)
5003 Known.SignBit = SignBitAllOne;
5004 return;
5005 }
5006
5007 FPClassTest KnownNotFromFlags = fcNone;
5008 if (const auto *CB = dyn_cast<CallBase>(V))
5009 KnownNotFromFlags |= CB->getRetNoFPClass();
5010 else if (const auto *Arg = dyn_cast<Argument>(V))
5011 KnownNotFromFlags |= Arg->getNoFPClass();
5012
5013 const Operator *Op = dyn_cast<Operator>(V);
5015 if (FPOp->hasNoNaNs())
5016 KnownNotFromFlags |= fcNan;
5017 if (FPOp->hasNoInfs())
5018 KnownNotFromFlags |= fcInf;
5019 }
5020
5021 KnownFPClass AssumedClasses = computeKnownFPClassFromContext(V, Q);
5022 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
5023
5024 // We no longer need to find out about these bits from inputs if we can
5025 // assume this from flags/attributes.
5026 InterestedClasses &= ~KnownNotFromFlags;
5027
5028 llvm::scope_exit ClearClassesFromFlags([=, &Known] {
5029 Known.knownNot(KnownNotFromFlags);
5030 if (!Known.SignBit && AssumedClasses.SignBit) {
5031 if (*AssumedClasses.SignBit)
5032 Known.signBitMustBeOne();
5033 else
5034 Known.signBitMustBeZero();
5035 }
5036 });
5037
5038 if (!Op)
5039 return;
5040
5041 // All recursive calls that increase depth must come after this.
5043 return;
5044
5045 const unsigned Opc = Op->getOpcode();
5046 switch (Opc) {
5047 case Instruction::FNeg: {
5048 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5049 Known, Q, Depth + 1);
5050 Known.fneg();
5051 break;
5052 }
5053 case Instruction::Select: {
5054 auto ComputeForArm = [&](Value *Arm, bool Invert) {
5055 KnownFPClass Res;
5056 computeKnownFPClass(Arm, DemandedElts, InterestedClasses, Res, Q,
5057 Depth + 1);
5058 adjustKnownFPClassForSelectArm(Res, Op->getOperand(0), Arm, Invert, Q,
5059 Depth);
5060 return Res;
5061 };
5062 // Only known if known in both the LHS and RHS.
5063 Known =
5064 ComputeForArm(Op->getOperand(1), /*Invert=*/false)
5065 .intersectWith(ComputeForArm(Op->getOperand(2), /*Invert=*/true));
5066 break;
5067 }
5068 case Instruction::Call: {
5069 const CallInst *II = cast<CallInst>(Op);
5070 const Intrinsic::ID IID = II->getIntrinsicID();
5071 switch (IID) {
5072 case Intrinsic::fabs: {
5073 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
5074 // If we only care about the sign bit we don't need to inspect the
5075 // operand.
5076 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5077 InterestedClasses, Known, Q, Depth + 1);
5078 }
5079
5080 Known.fabs();
5081 break;
5082 }
5083 case Intrinsic::copysign: {
5084 KnownFPClass KnownSign;
5085
5086 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5087 Known, Q, Depth + 1);
5088 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
5089 KnownSign, Q, Depth + 1);
5090 Known.copysign(KnownSign);
5091 break;
5092 }
5093 case Intrinsic::fma:
5094 case Intrinsic::fmuladd: {
5095 if ((InterestedClasses & fcNegative) == fcNone)
5096 break;
5097
5098 if (II->getArgOperand(0) != II->getArgOperand(1) ||
5099 !isGuaranteedNotToBeUndef(II->getArgOperand(0), Q.AC, Q.CxtI, Q.DT,
5100 Depth + 1))
5101 break;
5102
5103 // The multiply cannot be -0 and therefore the add can't be -0
5104 Known.knownNot(fcNegZero);
5105
5106 // x * x + y is non-negative if y is non-negative.
5107 KnownFPClass KnownAddend;
5108 computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses,
5109 KnownAddend, Q, Depth + 1);
5110
5111 if (KnownAddend.cannotBeOrderedLessThanZero())
5112 Known.knownNot(fcNegative);
5113 break;
5114 }
5115 case Intrinsic::sqrt:
5116 case Intrinsic::experimental_constrained_sqrt: {
5117 KnownFPClass KnownSrc;
5118 FPClassTest InterestedSrcs = InterestedClasses;
5119 if (InterestedClasses & fcNan)
5120 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
5121
5122 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5123 KnownSrc, Q, Depth + 1);
5124
5125 if (KnownSrc.isKnownNeverPosInfinity())
5126 Known.knownNot(fcPosInf);
5127 if (KnownSrc.isKnownNever(fcSNan))
5128 Known.knownNot(fcSNan);
5129
5130 // Any negative value besides -0 returns a nan.
5131 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
5132 Known.knownNot(fcNan);
5133
5134 // The only negative value that can be returned is -0 for -0 inputs.
5136
5137 // If the input denormal mode could be PreserveSign, a negative
5138 // subnormal input could produce a negative zero output.
5139 const Function *F = II->getFunction();
5140 const fltSemantics &FltSem =
5141 II->getType()->getScalarType()->getFltSemantics();
5142
5143 if (Q.IIQ.hasNoSignedZeros(II) ||
5144 (F &&
5145 KnownSrc.isKnownNeverLogicalNegZero(F->getDenormalMode(FltSem))))
5146 Known.knownNot(fcNegZero);
5147
5148 break;
5149 }
5150 case Intrinsic::sin:
5151 case Intrinsic::cos: {
5152 // Return NaN on infinite inputs.
5153 KnownFPClass KnownSrc;
5154 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5155 KnownSrc, Q, Depth + 1);
5156 Known.knownNot(fcInf);
5157 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
5158 Known.knownNot(fcNan);
5159 break;
5160 }
5161 case Intrinsic::maxnum:
5162 case Intrinsic::minnum:
5163 case Intrinsic::minimum:
5164 case Intrinsic::maximum:
5165 case Intrinsic::minimumnum:
5166 case Intrinsic::maximumnum: {
5167 KnownFPClass KnownLHS, KnownRHS;
5168 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5169 KnownLHS, Q, Depth + 1);
5170 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
5171 KnownRHS, Q, Depth + 1);
5172
5173 const Function *F = II->getFunction();
5174
5176 F ? F->getDenormalMode(
5177 II->getType()->getScalarType()->getFltSemantics())
5179
5180 Known = KnownFPClass::minMaxLike(KnownLHS, KnownRHS, getMinMaxKind(IID),
5181 Mode);
5182 break;
5183 }
5184 case Intrinsic::canonicalize: {
5185 KnownFPClass KnownSrc;
5186 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5187 KnownSrc, Q, Depth + 1);
5188
5189 const Function *F = II->getFunction();
5190 DenormalMode DenormMode =
5191 F ? F->getDenormalMode(
5192 II->getType()->getScalarType()->getFltSemantics())
5194 Known = KnownFPClass::canonicalize(KnownSrc, DenormMode);
5195 break;
5196 }
5197 case Intrinsic::vector_reduce_fmax:
5198 case Intrinsic::vector_reduce_fmin:
5199 case Intrinsic::vector_reduce_fmaximum:
5200 case Intrinsic::vector_reduce_fminimum: {
5201 // reduce min/max will choose an element from one of the vector elements,
5202 // so we can infer and class information that is common to all elements.
5203 Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(),
5204 InterestedClasses, Q, Depth + 1);
5205 // Can only propagate sign if output is never NaN.
5206 if (!Known.isKnownNeverNaN())
5207 Known.SignBit.reset();
5208 break;
5209 }
5210 // reverse preserves all characteristics of the input vec's element.
5211 case Intrinsic::vector_reverse:
5212 Known = computeKnownFPClass(
5213 II->getArgOperand(0), DemandedElts.reverseBits(),
5214 II->getFastMathFlags(), InterestedClasses, Q, Depth + 1);
5215 break;
5216 case Intrinsic::trunc:
5217 case Intrinsic::floor:
5218 case Intrinsic::ceil:
5219 case Intrinsic::rint:
5220 case Intrinsic::nearbyint:
5221 case Intrinsic::round:
5222 case Intrinsic::roundeven: {
5223 KnownFPClass KnownSrc;
5224 FPClassTest InterestedSrcs = InterestedClasses;
5225 if (InterestedSrcs & fcPosFinite)
5226 InterestedSrcs |= fcPosFinite;
5227 if (InterestedSrcs & fcNegFinite)
5228 InterestedSrcs |= fcNegFinite;
5229 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5230 KnownSrc, Q, Depth + 1);
5231
5232 // Integer results cannot be subnormal.
5233 Known.knownNot(fcSubnormal);
5234
5235 Known.propagateNaN(KnownSrc, true);
5236
5237 // Pass through infinities, except PPC_FP128 is a special case for
5238 // intrinsics other than trunc.
5239 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5240 if (KnownSrc.isKnownNeverPosInfinity())
5241 Known.knownNot(fcPosInf);
5242 if (KnownSrc.isKnownNeverNegInfinity())
5243 Known.knownNot(fcNegInf);
5244 }
5245
5246 // Negative round ups to 0 produce -0
5247 if (KnownSrc.isKnownNever(fcPosFinite))
5248 Known.knownNot(fcPosFinite);
5249 if (KnownSrc.isKnownNever(fcNegFinite))
5250 Known.knownNot(fcNegFinite);
5251
5252 break;
5253 }
5254 case Intrinsic::exp:
5255 case Intrinsic::exp2:
5256 case Intrinsic::exp10:
5257 case Intrinsic::amdgcn_exp2: {
5258 KnownFPClass KnownSrc;
5259 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5260 KnownSrc, Q, Depth + 1);
5261
5262 Known = KnownFPClass::exp(KnownSrc);
5263
5264 Type *EltTy = II->getType()->getScalarType();
5265 if (IID == Intrinsic::amdgcn_exp2 && EltTy->isFloatTy())
5266 Known.knownNot(fcSubnormal);
5267
5268 break;
5269 }
5270 case Intrinsic::fptrunc_round: {
5271 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5272 Q, Depth);
5273 break;
5274 }
5275 case Intrinsic::log:
5276 case Intrinsic::log10:
5277 case Intrinsic::log2:
5278 case Intrinsic::experimental_constrained_log:
5279 case Intrinsic::experimental_constrained_log10:
5280 case Intrinsic::experimental_constrained_log2:
5281 case Intrinsic::amdgcn_log: {
5282 Type *EltTy = II->getType()->getScalarType();
5283
5284 // log(+inf) -> +inf
5285 // log([+-]0.0) -> -inf
5286 // log(-inf) -> nan
5287 // log(-x) -> nan
5288 if ((InterestedClasses & (fcNan | fcInf)) != fcNone) {
5289 FPClassTest InterestedSrcs = InterestedClasses;
5290 if ((InterestedClasses & fcNegInf) != fcNone)
5291 InterestedSrcs |= fcZero | fcSubnormal;
5292 if ((InterestedClasses & fcNan) != fcNone)
5293 InterestedSrcs |= fcNan | fcNegative;
5294
5295 KnownFPClass KnownSrc;
5296 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5297 KnownSrc, Q, Depth + 1);
5298
5299 const Function *F = II->getFunction();
5300 DenormalMode Mode = F ? F->getDenormalMode(EltTy->getFltSemantics())
5302 Known = KnownFPClass::log(KnownSrc, Mode);
5303 }
5304
5305 if (IID == Intrinsic::amdgcn_log && EltTy->isFloatTy())
5306 Known.knownNot(fcSubnormal);
5307 break;
5308 }
5309 case Intrinsic::powi: {
5310 if ((InterestedClasses & fcNegative) == fcNone)
5311 break;
5312
5313 const Value *Exp = II->getArgOperand(1);
5314 Type *ExpTy = Exp->getType();
5315 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth();
5316 KnownBits ExponentKnownBits(BitWidth);
5317 computeKnownBits(Exp, isa<VectorType>(ExpTy) ? DemandedElts : APInt(1, 1),
5318 ExponentKnownBits, Q, Depth + 1);
5319
5320 if (ExponentKnownBits.Zero[0]) { // Is even
5321 Known.knownNot(fcNegative);
5322 break;
5323 }
5324
5325 // Given that exp is an integer, here are the
5326 // ways that pow can return a negative value:
5327 //
5328 // pow(-x, exp) --> negative if exp is odd and x is negative.
5329 // pow(-0, exp) --> -inf if exp is negative odd.
5330 // pow(-0, exp) --> -0 if exp is positive odd.
5331 // pow(-inf, exp) --> -0 if exp is negative odd.
5332 // pow(-inf, exp) --> -inf if exp is positive odd.
5333 KnownFPClass KnownSrc;
5334 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
5335 KnownSrc, Q, Depth + 1);
5336 if (KnownSrc.isKnownNever(fcNegative))
5337 Known.knownNot(fcNegative);
5338 break;
5339 }
5340 case Intrinsic::ldexp: {
5341 KnownFPClass KnownSrc;
5342 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5343 KnownSrc, Q, Depth + 1);
5344 Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
5345
5346 // Sign is preserved, but underflows may produce zeroes.
5347 if (KnownSrc.isKnownNever(fcNegative))
5348 Known.knownNot(fcNegative);
5349 else if (KnownSrc.cannotBeOrderedLessThanZero())
5351
5352 if (KnownSrc.isKnownNever(fcPositive))
5353 Known.knownNot(fcPositive);
5354 else if (KnownSrc.cannotBeOrderedGreaterThanZero())
5356
5357 // Can refine inf/zero handling based on the exponent operand.
5358 const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf;
5359 if ((InterestedClasses & ExpInfoMask) == fcNone)
5360 break;
5361 if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone)
5362 break;
5363
5364 const fltSemantics &Flt =
5365 II->getType()->getScalarType()->getFltSemantics();
5366 unsigned Precision = APFloat::semanticsPrecision(Flt);
5367 const Value *ExpArg = II->getArgOperand(1);
5369 ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1);
5370
5371 const int MantissaBits = Precision - 1;
5372 if (ExpRange.getSignedMin().sge(static_cast<int64_t>(MantissaBits)))
5373 Known.knownNot(fcSubnormal);
5374
5375 const Function *F = II->getFunction();
5376 const APInt *ConstVal = ExpRange.getSingleElement();
5377 const fltSemantics &FltSem =
5378 II->getType()->getScalarType()->getFltSemantics();
5379 if (ConstVal && ConstVal->isZero()) {
5380 // ldexp(x, 0) -> x, so propagate everything.
5381 Known.propagateCanonicalizingSrc(KnownSrc, F->getDenormalMode(FltSem));
5382 } else if (ExpRange.isAllNegative()) {
5383 // If we know the power is <= 0, can't introduce inf
5384 if (KnownSrc.isKnownNeverPosInfinity())
5385 Known.knownNot(fcPosInf);
5386 if (KnownSrc.isKnownNeverNegInfinity())
5387 Known.knownNot(fcNegInf);
5388 } else if (ExpRange.isAllNonNegative()) {
5389 // If we know the power is >= 0, can't introduce subnormal or zero
5390 if (KnownSrc.isKnownNeverPosSubnormal())
5391 Known.knownNot(fcPosSubnormal);
5392 if (KnownSrc.isKnownNeverNegSubnormal())
5393 Known.knownNot(fcNegSubnormal);
5394 if (F &&
5395 KnownSrc.isKnownNeverLogicalPosZero(F->getDenormalMode(FltSem)))
5396 Known.knownNot(fcPosZero);
5397 if (F &&
5398 KnownSrc.isKnownNeverLogicalNegZero(F->getDenormalMode(FltSem)))
5399 Known.knownNot(fcNegZero);
5400 }
5401
5402 break;
5403 }
5404 case Intrinsic::arithmetic_fence: {
5405 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5406 Known, Q, Depth + 1);
5407 break;
5408 }
5409 case Intrinsic::experimental_constrained_sitofp:
5410 case Intrinsic::experimental_constrained_uitofp:
5411 // Cannot produce nan
5412 Known.knownNot(fcNan);
5413
5414 // sitofp and uitofp turn into +0.0 for zero.
5415 Known.knownNot(fcNegZero);
5416
5417 // Integers cannot be subnormal
5418 Known.knownNot(fcSubnormal);
5419
5420 if (IID == Intrinsic::experimental_constrained_uitofp)
5421 Known.signBitMustBeZero();
5422
5423 // TODO: Copy inf handling from instructions
5424 break;
5425 case Intrinsic::amdgcn_rcp: {
5426 KnownFPClass KnownSrc;
5427 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5428 KnownSrc, Q, Depth + 1);
5429
5430 Known.propagateNaN(KnownSrc);
5431
5432 Type *EltTy = II->getType()->getScalarType();
5433
5434 // f32 denormal always flushed.
5435 if (EltTy->isFloatTy()) {
5436 Known.knownNot(fcSubnormal);
5437 KnownSrc.knownNot(fcSubnormal);
5438 }
5439
5440 if (KnownSrc.isKnownNever(fcNegative))
5441 Known.knownNot(fcNegative);
5442 if (KnownSrc.isKnownNever(fcPositive))
5443 Known.knownNot(fcPositive);
5444
5445 if (const Function *F = II->getFunction()) {
5446 DenormalMode Mode = F->getDenormalMode(EltTy->getFltSemantics());
5447 if (KnownSrc.isKnownNeverLogicalPosZero(Mode))
5448 Known.knownNot(fcPosInf);
5449 if (KnownSrc.isKnownNeverLogicalNegZero(Mode))
5450 Known.knownNot(fcNegInf);
5451 }
5452
5453 break;
5454 }
5455 case Intrinsic::amdgcn_rsq: {
5456 KnownFPClass KnownSrc;
5457 // The only negative value that can be returned is -inf for -0 inputs.
5459
5460 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5461 KnownSrc, Q, Depth + 1);
5462
5463 // Negative -> nan
5464 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
5465 Known.knownNot(fcNan);
5466 else if (KnownSrc.isKnownNever(fcSNan))
5467 Known.knownNot(fcSNan);
5468
5469 // +inf -> +0
5470 if (KnownSrc.isKnownNeverPosInfinity())
5471 Known.knownNot(fcPosZero);
5472
5473 Type *EltTy = II->getType()->getScalarType();
5474
5475 // f32 denormal always flushed.
5476 if (EltTy->isFloatTy())
5477 Known.knownNot(fcPosSubnormal);
5478
5479 if (const Function *F = II->getFunction()) {
5480 DenormalMode Mode = F->getDenormalMode(EltTy->getFltSemantics());
5481
5482 // -0 -> -inf
5483 if (KnownSrc.isKnownNeverLogicalNegZero(Mode))
5484 Known.knownNot(fcNegInf);
5485
5486 // +0 -> +inf
5487 if (KnownSrc.isKnownNeverLogicalPosZero(Mode))
5488 Known.knownNot(fcPosInf);
5489 }
5490
5491 break;
5492 }
5493 default:
5494 break;
5495 }
5496
5497 break;
5498 }
5499 case Instruction::FAdd:
5500 case Instruction::FSub: {
5501 KnownFPClass KnownLHS, KnownRHS;
5502 bool WantNegative =
5503 Op->getOpcode() == Instruction::FAdd &&
5504 (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone;
5505 bool WantNaN = (InterestedClasses & fcNan) != fcNone;
5506 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
5507
5508 if (!WantNaN && !WantNegative && !WantNegZero)
5509 break;
5510
5511 FPClassTest InterestedSrcs = InterestedClasses;
5512 if (WantNegative)
5513 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
5514 if (InterestedClasses & fcNan)
5515 InterestedSrcs |= fcInf;
5516 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs,
5517 KnownRHS, Q, Depth + 1);
5518
5519 // Special case fadd x, x, which is the canonical form of fmul x, 2.
5520 bool SelfAdd = Op->getOperand(0) == Op->getOperand(1) &&
5521 isGuaranteedNotToBeUndef(Op->getOperand(0), Q.AC, Q.CxtI,
5522 Q.DT, Depth + 1);
5523 if (SelfAdd)
5524 KnownLHS = KnownRHS;
5525
5526 if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
5527 (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
5528 WantNegZero || Opc == Instruction::FSub) {
5529
5530 if (!SelfAdd) {
5531 // RHS is canonically cheaper to compute. Skip inspecting the LHS if
5532 // there's no point.
5533 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs,
5534 KnownLHS, Q, Depth + 1);
5535 }
5536
5537 // Adding positive and negative infinity produces NaN.
5538 // TODO: Check sign of infinities.
5539 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5540 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
5541 Known.knownNot(fcNan);
5542
5543 // FIXME: Context function should always be passed in separately
5544 const Function *F = cast<Instruction>(Op)->getFunction();
5545
5546 if (Op->getOpcode() == Instruction::FAdd) {
5547 if (KnownLHS.cannotBeOrderedLessThanZero() &&
5548 KnownRHS.cannotBeOrderedLessThanZero())
5550 if (KnownLHS.cannotBeOrderedGreaterThanZero() &&
5553
5554 if (!F)
5555 break;
5556
5557 const fltSemantics &FltSem =
5558 Op->getType()->getScalarType()->getFltSemantics();
5559 DenormalMode Mode = F->getDenormalMode(FltSem);
5560
5561 // Doubling 0 will give the same 0.
5562 if (SelfAdd && KnownRHS.isKnownNeverLogicalPosZero(Mode) &&
5563 (Mode.Output == DenormalMode::IEEE ||
5565 Known.knownNot(fcPosZero);
5566
5567 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
5568 if ((KnownLHS.isKnownNeverLogicalNegZero(Mode) ||
5569 KnownRHS.isKnownNeverLogicalNegZero(Mode)) &&
5570 // Make sure output negative denormal can't flush to -0
5571 (Mode.Output == DenormalMode::IEEE ||
5573 Known.knownNot(fcNegZero);
5574 } else {
5575 if (!F)
5576 break;
5577
5578 const fltSemantics &FltSem =
5579 Op->getType()->getScalarType()->getFltSemantics();
5580 DenormalMode Mode = F->getDenormalMode(FltSem);
5581
5582 // Only fsub -0, +0 can return -0
5583 if ((KnownLHS.isKnownNeverLogicalNegZero(Mode) ||
5584 KnownRHS.isKnownNeverLogicalPosZero(Mode)) &&
5585 // Make sure output negative denormal can't flush to -0
5586 (Mode.Output == DenormalMode::IEEE ||
5588 Known.knownNot(fcNegZero);
5589 }
5590 }
5591
5592 break;
5593 }
5594 case Instruction::FMul: {
5595 const Function *F = cast<Instruction>(Op)->getFunction();
5597 F ? F->getDenormalMode(
5598 Op->getType()->getScalarType()->getFltSemantics())
5600
5601 // X * X is always non-negative or a NaN.
5602 if (Op->getOperand(0) == Op->getOperand(1)) {
5603 KnownFPClass KnownSrc;
5604 computeKnownFPClass(Op->getOperand(0), DemandedElts, fcAllFlags, KnownSrc,
5605 Q, Depth + 1);
5606 Known = KnownFPClass::square(KnownSrc, Mode);
5607 break;
5608 }
5609
5610 KnownFPClass KnownLHS, KnownRHS;
5611
5612 bool CannotBeSubnormal = false;
5613 const APFloat *CRHS;
5614 if (match(Op->getOperand(1), m_APFloat(CRHS))) {
5615 // Match denormal scaling pattern, similar to the case in ldexp. If the
5616 // constant's exponent is sufficiently large, the result cannot be
5617 // subnormal.
5618
5619 // TODO: Should do general ConstantFPRange analysis.
5620 const fltSemantics &Flt =
5621 Op->getType()->getScalarType()->getFltSemantics();
5622 unsigned Precision = APFloat::semanticsPrecision(Flt);
5623 const int MantissaBits = Precision - 1;
5624
5625 int MinKnownExponent = ilogb(*CRHS);
5626 if (MinKnownExponent >= MantissaBits)
5627 CannotBeSubnormal = true;
5628
5629 KnownRHS = KnownFPClass(*CRHS);
5630 } else {
5631 computeKnownFPClass(Op->getOperand(1), DemandedElts, fcAllFlags, KnownRHS,
5632 Q, Depth + 1);
5633 }
5634
5635 computeKnownFPClass(Op->getOperand(0), DemandedElts, fcAllFlags, KnownLHS,
5636 Q, Depth + 1);
5637
5638 Known = KnownFPClass::fmul(KnownLHS, KnownRHS, Mode);
5639 if (CannotBeSubnormal)
5640 Known.knownNot(fcSubnormal);
5641 break;
5642 }
5643 case Instruction::FDiv:
5644 case Instruction::FRem: {
5645 if (Op->getOperand(0) == Op->getOperand(1) &&
5646 isGuaranteedNotToBeUndef(Op->getOperand(0), Q.AC, Q.CxtI, Q.DT)) {
5647 // TODO: Could filter out snan if we inspect the operand
5648 if (Op->getOpcode() == Instruction::FDiv) {
5649 // X / X is always exactly 1.0 or a NaN.
5651 } else {
5652 // X % X is always exactly [+-]0.0 or a NaN.
5653 Known.KnownFPClasses = fcNan | fcZero;
5654 }
5655
5656 break;
5657 }
5658
5659 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
5660 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
5661 const bool WantPositive =
5662 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone;
5663 if (!WantNan && !WantNegative && !WantPositive)
5664 break;
5665
5666 KnownFPClass KnownLHS, KnownRHS;
5667
5668 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5669 fcNan | fcInf | fcZero | fcNegative, KnownRHS, Q,
5670 Depth + 1);
5671
5672 bool KnowSomethingUseful =
5673 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
5674
5675 if (KnowSomethingUseful || WantPositive) {
5676 const FPClassTest InterestedLHS =
5677 WantPositive ? fcAllFlags
5679
5680 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5681 InterestedClasses & InterestedLHS, KnownLHS, Q,
5682 Depth + 1);
5683 }
5684
5685 const Function *F = cast<Instruction>(Op)->getFunction();
5686 const fltSemantics &FltSem =
5687 Op->getType()->getScalarType()->getFltSemantics();
5688
5689 if (Op->getOpcode() == Instruction::FDiv) {
5690 // Only 0/0, Inf/Inf produce NaN.
5691 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5692 (KnownLHS.isKnownNeverInfinity() ||
5693 KnownRHS.isKnownNeverInfinity()) &&
5694 ((F &&
5695 KnownLHS.isKnownNeverLogicalZero(F->getDenormalMode(FltSem))) ||
5696 (F &&
5697 KnownRHS.isKnownNeverLogicalZero(F->getDenormalMode(FltSem))))) {
5698 Known.knownNot(fcNan);
5699 }
5700
5701 // X / -0.0 is -Inf (or NaN).
5702 // +X / +X is +X
5703 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative))
5704 Known.knownNot(fcNegative);
5705 } else {
5706 // Inf REM x and x REM 0 produce NaN.
5707 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5708 KnownLHS.isKnownNeverInfinity() && F &&
5709 KnownRHS.isKnownNeverLogicalZero(F->getDenormalMode(FltSem))) {
5710 Known.knownNot(fcNan);
5711 }
5712
5713 // The sign for frem is the same as the first operand.
5714 if (KnownLHS.cannotBeOrderedLessThanZero())
5716 if (KnownLHS.cannotBeOrderedGreaterThanZero())
5718
5719 // See if we can be more aggressive about the sign of 0.
5720 if (KnownLHS.isKnownNever(fcNegative))
5721 Known.knownNot(fcNegative);
5722 if (KnownLHS.isKnownNever(fcPositive))
5723 Known.knownNot(fcPositive);
5724 }
5725
5726 break;
5727 }
5728 case Instruction::FPExt: {
5729 // Infinity, nan and zero propagate from source.
5730 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5731 Known, Q, Depth + 1);
5732
5733 const fltSemantics &DstTy =
5734 Op->getType()->getScalarType()->getFltSemantics();
5735 const fltSemantics &SrcTy =
5736 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5737
5738 // All subnormal inputs should be in the normal range in the result type.
5739 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy)) {
5740 if (Known.KnownFPClasses & fcPosSubnormal)
5741 Known.KnownFPClasses |= fcPosNormal;
5742 if (Known.KnownFPClasses & fcNegSubnormal)
5743 Known.KnownFPClasses |= fcNegNormal;
5744 Known.knownNot(fcSubnormal);
5745 }
5746
5747 // Sign bit of a nan isn't guaranteed.
5748 if (!Known.isKnownNeverNaN())
5749 Known.SignBit = std::nullopt;
5750 break;
5751 }
5752 case Instruction::FPTrunc: {
5753 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known, Q,
5754 Depth);
5755 break;
5756 }
5757 case Instruction::SIToFP:
5758 case Instruction::UIToFP: {
5759 // Cannot produce nan
5760 Known.knownNot(fcNan);
5761
5762 // Integers cannot be subnormal
5763 Known.knownNot(fcSubnormal);
5764
5765 // sitofp and uitofp turn into +0.0 for zero.
5766 Known.knownNot(fcNegZero);
5767 if (Op->getOpcode() == Instruction::UIToFP)
5768 Known.signBitMustBeZero();
5769
5770 if (InterestedClasses & fcInf) {
5771 // Get width of largest magnitude integer (remove a bit if signed).
5772 // This still works for a signed minimum value because the largest FP
5773 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
5774 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits();
5775 if (Op->getOpcode() == Instruction::SIToFP)
5776 --IntSize;
5777
5778 // If the exponent of the largest finite FP value can hold the largest
5779 // integer, the result of the cast must be finite.
5780 Type *FPTy = Op->getType()->getScalarType();
5781 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize)
5782 Known.knownNot(fcInf);
5783 }
5784
5785 break;
5786 }
5787 case Instruction::ExtractElement: {
5788 // Look through extract element. If the index is non-constant or
5789 // out-of-range demand all elements, otherwise just the extracted element.
5790 const Value *Vec = Op->getOperand(0);
5791
5792 APInt DemandedVecElts;
5793 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
5794 unsigned NumElts = VecTy->getNumElements();
5795 DemandedVecElts = APInt::getAllOnes(NumElts);
5796 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(1));
5797 if (CIdx && CIdx->getValue().ult(NumElts))
5798 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
5799 } else {
5800 DemandedVecElts = APInt(1, 1);
5801 }
5802
5803 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
5804 Q, Depth + 1);
5805 }
5806 case Instruction::InsertElement: {
5807 if (isa<ScalableVectorType>(Op->getType()))
5808 return;
5809
5810 const Value *Vec = Op->getOperand(0);
5811 const Value *Elt = Op->getOperand(1);
5812 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2));
5813 unsigned NumElts = DemandedElts.getBitWidth();
5814 APInt DemandedVecElts = DemandedElts;
5815 bool NeedsElt = true;
5816 // If we know the index we are inserting to, clear it from Vec check.
5817 if (CIdx && CIdx->getValue().ult(NumElts)) {
5818 DemandedVecElts.clearBit(CIdx->getZExtValue());
5819 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5820 }
5821
5822 // Do we demand the inserted element?
5823 if (NeedsElt) {
5824 computeKnownFPClass(Elt, Known, InterestedClasses, Q, Depth + 1);
5825 // If we don't know any bits, early out.
5826 if (Known.isUnknown())
5827 break;
5828 } else {
5829 Known.KnownFPClasses = fcNone;
5830 }
5831
5832 // Do we need anymore elements from Vec?
5833 if (!DemandedVecElts.isZero()) {
5834 KnownFPClass Known2;
5835 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2, Q,
5836 Depth + 1);
5837 Known |= Known2;
5838 }
5839
5840 break;
5841 }
5842 case Instruction::ShuffleVector: {
5843 // Handle vector splat idiom
5844 if (Value *Splat = getSplatValue(V)) {
5845 computeKnownFPClass(Splat, Known, InterestedClasses, Q, Depth + 1);
5846 break;
5847 }
5848
5849 // For undef elements, we don't know anything about the common state of
5850 // the shuffle result.
5851 APInt DemandedLHS, DemandedRHS;
5852 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op);
5853 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
5854 return;
5855
5856 if (!!DemandedLHS) {
5857 const Value *LHS = Shuf->getOperand(0);
5858 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known, Q,
5859 Depth + 1);
5860
5861 // If we don't know any bits, early out.
5862 if (Known.isUnknown())
5863 break;
5864 } else {
5865 Known.KnownFPClasses = fcNone;
5866 }
5867
5868 if (!!DemandedRHS) {
5869 KnownFPClass Known2;
5870 const Value *RHS = Shuf->getOperand(1);
5871 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2, Q,
5872 Depth + 1);
5873 Known |= Known2;
5874 }
5875
5876 break;
5877 }
5878 case Instruction::ExtractValue: {
5879 const ExtractValueInst *Extract = cast<ExtractValueInst>(Op);
5880 ArrayRef<unsigned> Indices = Extract->getIndices();
5881 const Value *Src = Extract->getAggregateOperand();
5882 if (isa<StructType>(Src->getType()) && Indices.size() == 1 &&
5883 Indices[0] == 0) {
5884 if (const auto *II = dyn_cast<IntrinsicInst>(Src)) {
5885 switch (II->getIntrinsicID()) {
5886 case Intrinsic::frexp: {
5887 Known.knownNot(fcSubnormal);
5888
5889 KnownFPClass KnownSrc;
5890 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5891 InterestedClasses, KnownSrc, Q, Depth + 1);
5892
5893 const Function *F = cast<Instruction>(Op)->getFunction();
5894 const fltSemantics &FltSem =
5895 Op->getType()->getScalarType()->getFltSemantics();
5896
5897 if (KnownSrc.isKnownNever(fcNegative))
5898 Known.knownNot(fcNegative);
5899 else {
5900 if (F &&
5901 KnownSrc.isKnownNeverLogicalNegZero(F->getDenormalMode(FltSem)))
5902 Known.knownNot(fcNegZero);
5903 if (KnownSrc.isKnownNever(fcNegInf))
5904 Known.knownNot(fcNegInf);
5905 }
5906
5907 if (KnownSrc.isKnownNever(fcPositive))
5908 Known.knownNot(fcPositive);
5909 else {
5910 if (F &&
5911 KnownSrc.isKnownNeverLogicalPosZero(F->getDenormalMode(FltSem)))
5912 Known.knownNot(fcPosZero);
5913 if (KnownSrc.isKnownNever(fcPosInf))
5914 Known.knownNot(fcPosInf);
5915 }
5916
5917 Known.propagateNaN(KnownSrc);
5918 return;
5919 }
5920 default:
5921 break;
5922 }
5923 }
5924 }
5925
5926 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Q,
5927 Depth + 1);
5928 break;
5929 }
5930 case Instruction::PHI: {
5931 const PHINode *P = cast<PHINode>(Op);
5932 // Unreachable blocks may have zero-operand PHI nodes.
5933 if (P->getNumIncomingValues() == 0)
5934 break;
5935
5936 // Otherwise take the unions of the known bit sets of the operands,
5937 // taking conservative care to avoid excessive recursion.
5938 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2;
5939
5940 if (Depth < PhiRecursionLimit) {
5941 // Skip if every incoming value references to ourself.
5942 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
5943 break;
5944
5945 bool First = true;
5946
5947 for (const Use &U : P->operands()) {
5948 Value *IncValue;
5949 Instruction *CxtI;
5950 breakSelfRecursivePHI(&U, P, IncValue, CxtI);
5951 // Skip direct self references.
5952 if (IncValue == P)
5953 continue;
5954
5955 KnownFPClass KnownSrc;
5956 // Recurse, but cap the recursion to two levels, because we don't want
5957 // to waste time spinning around in loops. We need at least depth 2 to
5958 // detect known sign bits.
5959 computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc,
5961 PhiRecursionLimit);
5962
5963 if (First) {
5964 Known = KnownSrc;
5965 First = false;
5966 } else {
5967 Known |= KnownSrc;
5968 }
5969
5970 if (Known.KnownFPClasses == fcAllFlags)
5971 break;
5972 }
5973 }
5974
5975 break;
5976 }
5977 case Instruction::BitCast: {
5978 const Value *Src;
5979 if (!match(Op, m_ElementWiseBitCast(m_Value(Src))) ||
5980 !Src->getType()->isIntOrIntVectorTy())
5981 break;
5982
5983 const Type *Ty = Op->getType()->getScalarType();
5984 KnownBits Bits(Ty->getScalarSizeInBits());
5985 computeKnownBits(Src, DemandedElts, Bits, Q, Depth + 1);
5986
5987 // Transfer information from the sign bit.
5988 if (Bits.isNonNegative())
5989 Known.signBitMustBeZero();
5990 else if (Bits.isNegative())
5991 Known.signBitMustBeOne();
5992
5993 if (Ty->isIEEELikeFPTy()) {
5994 // IEEE floats are NaN when all bits of the exponent plus at least one of
5995 // the fraction bits are 1. This means:
5996 // - If we assume unknown bits are 0 and the value is NaN, it will
5997 // always be NaN
5998 // - If we assume unknown bits are 1 and the value is not NaN, it can
5999 // never be NaN
6000 // Note: They do not hold for x86_fp80 format.
6001 if (APFloat(Ty->getFltSemantics(), Bits.One).isNaN())
6002 Known.KnownFPClasses = fcNan;
6003 else if (!APFloat(Ty->getFltSemantics(), ~Bits.Zero).isNaN())
6004 Known.knownNot(fcNan);
6005
6006 // Build KnownBits representing Inf and check if it must be equal or
6007 // unequal to this value.
6008 auto InfKB = KnownBits::makeConstant(
6009 APFloat::getInf(Ty->getFltSemantics()).bitcastToAPInt());
6010 InfKB.Zero.clearSignBit();
6011 if (const auto InfResult = KnownBits::eq(Bits, InfKB)) {
6012 assert(!InfResult.value());
6013 Known.knownNot(fcInf);
6014 } else if (Bits == InfKB) {
6015 Known.KnownFPClasses = fcInf;
6016 }
6017
6018 // Build KnownBits representing Zero and check if it must be equal or
6019 // unequal to this value.
6020 auto ZeroKB = KnownBits::makeConstant(
6021 APFloat::getZero(Ty->getFltSemantics()).bitcastToAPInt());
6022 ZeroKB.Zero.clearSignBit();
6023 if (const auto ZeroResult = KnownBits::eq(Bits, ZeroKB)) {
6024 assert(!ZeroResult.value());
6025 Known.knownNot(fcZero);
6026 } else if (Bits == ZeroKB) {
6027 Known.KnownFPClasses = fcZero;
6028 }
6029 }
6030
6031 break;
6032 }
6033 default:
6034 break;
6035 }
6036}
6037
6039 const APInt &DemandedElts,
6040 FPClassTest InterestedClasses,
6041 const SimplifyQuery &SQ,
6042 unsigned Depth) {
6043 KnownFPClass KnownClasses;
6044 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, SQ,
6045 Depth);
6046 return KnownClasses;
6047}
6048
6050 FPClassTest InterestedClasses,
6051 const SimplifyQuery &SQ,
6052 unsigned Depth) {
6053 KnownFPClass Known;
6054 ::computeKnownFPClass(V, Known, InterestedClasses, SQ, Depth);
6055 return Known;
6056}
6057
6059 const Value *V, const DataLayout &DL, FPClassTest InterestedClasses,
6060 const TargetLibraryInfo *TLI, AssumptionCache *AC, const Instruction *CxtI,
6061 const DominatorTree *DT, bool UseInstrInfo, unsigned Depth) {
6062 return computeKnownFPClass(V, InterestedClasses,
6063 SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo),
6064 Depth);
6065}
6066
6068llvm::computeKnownFPClass(const Value *V, const APInt &DemandedElts,
6069 FastMathFlags FMF, FPClassTest InterestedClasses,
6070 const SimplifyQuery &SQ, unsigned Depth) {
6071 if (FMF.noNaNs())
6072 InterestedClasses &= ~fcNan;
6073 if (FMF.noInfs())
6074 InterestedClasses &= ~fcInf;
6075
6076 KnownFPClass Result =
6077 computeKnownFPClass(V, DemandedElts, InterestedClasses, SQ, Depth);
6078
6079 if (FMF.noNaNs())
6080 Result.KnownFPClasses &= ~fcNan;
6081 if (FMF.noInfs())
6082 Result.KnownFPClasses &= ~fcInf;
6083 return Result;
6084}
6085
6087 FPClassTest InterestedClasses,
6088 const SimplifyQuery &SQ,
6089 unsigned Depth) {
6090 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
6091 APInt DemandedElts =
6092 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
6093 return computeKnownFPClass(V, DemandedElts, FMF, InterestedClasses, SQ,
6094 Depth);
6095}
6096
6098 unsigned Depth) {
6100 return Known.isKnownNeverNegZero();
6101}
6102
6109
6111 unsigned Depth) {
6113 return Known.isKnownNeverInfinity();
6114}
6115
6116/// Return true if the floating-point value can never contain a NaN or infinity.
6118 unsigned Depth) {
6120 return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
6121}
6122
6123/// Return true if the floating-point scalar value is not a NaN or if the
6124/// floating-point vector value has no NaN elements. Return false if a value
6125/// could ever be NaN.
6127 unsigned Depth) {
6129 return Known.isKnownNeverNaN();
6130}
6131
6132/// Return false if we can prove that the specified FP value's sign bit is 0.
6133/// Return true if we can prove that the specified FP value's sign bit is 1.
6134/// Otherwise return std::nullopt.
6135std::optional<bool> llvm::computeKnownFPSignBit(const Value *V,
6136 const SimplifyQuery &SQ,
6137 unsigned Depth) {
6139 return Known.SignBit;
6140}
6141
6143 auto *User = cast<Instruction>(U.getUser());
6144 if (auto *FPOp = dyn_cast<FPMathOperator>(User)) {
6145 if (FPOp->hasNoSignedZeros())
6146 return true;
6147 }
6148
6149 switch (User->getOpcode()) {
6150 case Instruction::FPToSI:
6151 case Instruction::FPToUI:
6152 return true;
6153 case Instruction::FCmp:
6154 // fcmp treats both positive and negative zero as equal.
6155 return true;
6156 case Instruction::Call:
6157 if (auto *II = dyn_cast<IntrinsicInst>(User)) {
6158 switch (II->getIntrinsicID()) {
6159 case Intrinsic::fabs:
6160 return true;
6161 case Intrinsic::copysign:
6162 return U.getOperandNo() == 0;
6163 case Intrinsic::is_fpclass:
6164 case Intrinsic::vp_is_fpclass: {
6165 auto Test =
6166 static_cast<FPClassTest>(
6167 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue()) &
6170 }
6171 default:
6172 return false;
6173 }
6174 }
6175 return false;
6176 default:
6177 return false;
6178 }
6179}
6180
6182 auto *User = cast<Instruction>(U.getUser());
6183 if (auto *FPOp = dyn_cast<FPMathOperator>(User)) {
6184 if (FPOp->hasNoNaNs())
6185 return true;
6186 }
6187
6188 switch (User->getOpcode()) {
6189 case Instruction::FPToSI:
6190 case Instruction::FPToUI:
6191 return true;
6192 // Proper FP math operations ignore the sign bit of NaN.
6193 case Instruction::FAdd:
6194 case Instruction::FSub:
6195 case Instruction::FMul:
6196 case Instruction::FDiv:
6197 case Instruction::FRem:
6198 case Instruction::FPTrunc:
6199 case Instruction::FPExt:
6200 case Instruction::FCmp:
6201 return true;
6202 // Bitwise FP operations should preserve the sign bit of NaN.
6203 case Instruction::FNeg:
6204 case Instruction::Select:
6205 case Instruction::PHI:
6206 return false;
6207 case Instruction::Ret:
6208 return User->getFunction()->getAttributes().getRetNoFPClass() &
6210 case Instruction::Call:
6211 case Instruction::Invoke: {
6212 if (auto *II = dyn_cast<IntrinsicInst>(User)) {
6213 switch (II->getIntrinsicID()) {
6214 case Intrinsic::fabs:
6215 return true;
6216 case Intrinsic::copysign:
6217 return U.getOperandNo() == 0;
6218 // Other proper FP math intrinsics ignore the sign bit of NaN.
6219 case Intrinsic::maxnum:
6220 case Intrinsic::minnum:
6221 case Intrinsic::maximum:
6222 case Intrinsic::minimum:
6223 case Intrinsic::maximumnum:
6224 case Intrinsic::minimumnum:
6225 case Intrinsic::canonicalize:
6226 case Intrinsic::fma:
6227 case Intrinsic::fmuladd:
6228 case Intrinsic::sqrt:
6229 case Intrinsic::pow:
6230 case Intrinsic::powi:
6231 case Intrinsic::fptoui_sat:
6232 case Intrinsic::fptosi_sat:
6233 case Intrinsic::is_fpclass:
6234 case Intrinsic::vp_is_fpclass:
6235 return true;
6236 default:
6237 return false;
6238 }
6239 }
6240
6241 FPClassTest NoFPClass =
6242 cast<CallBase>(User)->getParamNoFPClass(U.getOperandNo());
6243 return NoFPClass & FPClassTest::fcNan;
6244 }
6245 default:
6246 return false;
6247 }
6248}
6249
6251
6252 // All byte-wide stores are splatable, even of arbitrary variables.
6253 if (V->getType()->isIntegerTy(8))
6254 return V;
6255
6256 LLVMContext &Ctx = V->getContext();
6257
6258 // Undef don't care.
6259 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
6260 if (isa<UndefValue>(V))
6261 return UndefInt8;
6262
6263 // Return poison for zero-sized type.
6264 if (DL.getTypeStoreSize(V->getType()).isZero())
6265 return PoisonValue::get(Type::getInt8Ty(Ctx));
6266
6268 if (!C) {
6269 // Conceptually, we could handle things like:
6270 // %a = zext i8 %X to i16
6271 // %b = shl i16 %a, 8
6272 // %c = or i16 %a, %b
6273 // but until there is an example that actually needs this, it doesn't seem
6274 // worth worrying about.
6275 return nullptr;
6276 }
6277
6278 // Handle 'null' ConstantArrayZero etc.
6279 if (C->isNullValue())
6281
6282 // Constant floating-point values can be handled as integer values if the
6283 // corresponding integer value is "byteable". An important case is 0.0.
6284 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
6285 Type *Ty = nullptr;
6286 if (CFP->getType()->isHalfTy())
6287 Ty = Type::getInt16Ty(Ctx);
6288 else if (CFP->getType()->isFloatTy())
6289 Ty = Type::getInt32Ty(Ctx);
6290 else if (CFP->getType()->isDoubleTy())
6291 Ty = Type::getInt64Ty(Ctx);
6292 // Don't handle long double formats, which have strange constraints.
6293 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
6294 : nullptr;
6295 }
6296
6297 // We can handle constant integers that are multiple of 8 bits.
6298 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
6299 if (CI->getBitWidth() % 8 == 0) {
6300 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
6301 if (!CI->getValue().isSplat(8))
6302 return nullptr;
6303 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6304 }
6305 }
6306
6307 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
6308 if (CE->getOpcode() == Instruction::IntToPtr) {
6309 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
6310 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6312 CE->getOperand(0), Type::getIntNTy(Ctx, BitWidth), false, DL))
6313 return isBytewiseValue(Op, DL);
6314 }
6315 }
6316 }
6317
6318 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
6319 if (LHS == RHS)
6320 return LHS;
6321 if (!LHS || !RHS)
6322 return nullptr;
6323 if (LHS == UndefInt8)
6324 return RHS;
6325 if (RHS == UndefInt8)
6326 return LHS;
6327 return nullptr;
6328 };
6329
6331 Value *Val = UndefInt8;
6332 for (uint64_t I = 0, E = CA->getNumElements(); I != E; ++I)
6333 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
6334 return nullptr;
6335 return Val;
6336 }
6337
6339 Value *Val = UndefInt8;
6340 for (Value *Op : C->operands())
6341 if (!(Val = Merge(Val, isBytewiseValue(Op, DL))))
6342 return nullptr;
6343 return Val;
6344 }
6345
6346 // Don't try to handle the handful of other constants.
6347 return nullptr;
6348}
6349
6350// This is the recursive version of BuildSubAggregate. It takes a few different
6351// arguments. Idxs is the index within the nested struct From that we are
6352// looking at now (which is of type IndexedType). IdxSkip is the number of
6353// indices from Idxs that should be left out when inserting into the resulting
6354// struct. To is the result struct built so far, new insertvalue instructions
6355// build on that.
6356static Value *BuildSubAggregate(Value *From, Value *To, Type *IndexedType,
6358 unsigned IdxSkip,
6359 BasicBlock::iterator InsertBefore) {
6360 StructType *STy = dyn_cast<StructType>(IndexedType);
6361 if (STy) {
6362 // Save the original To argument so we can modify it
6363 Value *OrigTo = To;
6364 // General case, the type indexed by Idxs is a struct
6365 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
6366 // Process each struct element recursively
6367 Idxs.push_back(i);
6368 Value *PrevTo = To;
6369 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
6370 InsertBefore);
6371 Idxs.pop_back();
6372 if (!To) {
6373 // Couldn't find any inserted value for this index? Cleanup
6374 while (PrevTo != OrigTo) {
6376 PrevTo = Del->getAggregateOperand();
6377 Del->eraseFromParent();
6378 }
6379 // Stop processing elements
6380 break;
6381 }
6382 }
6383 // If we successfully found a value for each of our subaggregates
6384 if (To)
6385 return To;
6386 }
6387 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
6388 // the struct's elements had a value that was inserted directly. In the latter
6389 // case, perhaps we can't determine each of the subelements individually, but
6390 // we might be able to find the complete struct somewhere.
6391
6392 // Find the value that is at that particular spot
6393 Value *V = FindInsertedValue(From, Idxs);
6394
6395 if (!V)
6396 return nullptr;
6397
6398 // Insert the value in the new (sub) aggregate
6399 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
6400 InsertBefore);
6401}
6402
6403// This helper takes a nested struct and extracts a part of it (which is again a
6404// struct) into a new value. For example, given the struct:
6405// { a, { b, { c, d }, e } }
6406// and the indices "1, 1" this returns
6407// { c, d }.
6408//
6409// It does this by inserting an insertvalue for each element in the resulting
6410// struct, as opposed to just inserting a single struct. This will only work if
6411// each of the elements of the substruct are known (ie, inserted into From by an
6412// insertvalue instruction somewhere).
6413//
6414// All inserted insertvalue instructions are inserted before InsertBefore
6416 BasicBlock::iterator InsertBefore) {
6417 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
6418 idx_range);
6419 Value *To = PoisonValue::get(IndexedType);
6420 SmallVector<unsigned, 10> Idxs(idx_range);
6421 unsigned IdxSkip = Idxs.size();
6422
6423 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
6424}
6425
6426/// Given an aggregate and a sequence of indices, see if the scalar value
6427/// indexed is already around as a register, for example if it was inserted
6428/// directly into the aggregate.
6429///
6430/// If InsertBefore is not null, this function will duplicate (modified)
6431/// insertvalues when a part of a nested struct is extracted.
6432Value *
6434 std::optional<BasicBlock::iterator> InsertBefore) {
6435 // Nothing to index? Just return V then (this is useful at the end of our
6436 // recursion).
6437 if (idx_range.empty())
6438 return V;
6439 // We have indices, so V should have an indexable type.
6440 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6441 "Not looking at a struct or array?");
6442 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
6443 "Invalid indices for type?");
6444
6445 if (Constant *C = dyn_cast<Constant>(V)) {
6446 C = C->getAggregateElement(idx_range[0]);
6447 if (!C) return nullptr;
6448 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
6449 }
6450
6452 // Loop the indices for the insertvalue instruction in parallel with the
6453 // requested indices
6454 const unsigned *req_idx = idx_range.begin();
6455 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
6456 i != e; ++i, ++req_idx) {
6457 if (req_idx == idx_range.end()) {
6458 // We can't handle this without inserting insertvalues
6459 if (!InsertBefore)
6460 return nullptr;
6461
6462 // The requested index identifies a part of a nested aggregate. Handle
6463 // this specially. For example,
6464 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
6465 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
6466 // %C = extractvalue {i32, { i32, i32 } } %B, 1
6467 // This can be changed into
6468 // %A = insertvalue {i32, i32 } undef, i32 10, 0
6469 // %C = insertvalue {i32, i32 } %A, i32 11, 1
6470 // which allows the unused 0,0 element from the nested struct to be
6471 // removed.
6472 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
6473 *InsertBefore);
6474 }
6475
6476 // This insert value inserts something else than what we are looking for.
6477 // See if the (aggregate) value inserted into has the value we are
6478 // looking for, then.
6479 if (*req_idx != *i)
6480 return FindInsertedValue(I->getAggregateOperand(), idx_range,
6481 InsertBefore);
6482 }
6483 // If we end up here, the indices of the insertvalue match with those
6484 // requested (though possibly only partially). Now we recursively look at
6485 // the inserted value, passing any remaining indices.
6486 return FindInsertedValue(I->getInsertedValueOperand(),
6487 ArrayRef(req_idx, idx_range.end()), InsertBefore);
6488 }
6489
6491 // If we're extracting a value from an aggregate that was extracted from
6492 // something else, we can extract from that something else directly instead.
6493 // However, we will need to chain I's indices with the requested indices.
6494
6495 // Calculate the number of indices required
6496 unsigned size = I->getNumIndices() + idx_range.size();
6497 // Allocate some space to put the new indices in
6499 Idxs.reserve(size);
6500 // Add indices from the extract value instruction
6501 Idxs.append(I->idx_begin(), I->idx_end());
6502
6503 // Add requested indices
6504 Idxs.append(idx_range.begin(), idx_range.end());
6505
6506 assert(Idxs.size() == size
6507 && "Number of indices added not correct?");
6508
6509 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
6510 }
6511 // Otherwise, we don't know (such as, extracting from a function return value
6512 // or load instruction)
6513 return nullptr;
6514}
6515
6516// If V refers to an initialized global constant, set Slice either to
6517// its initializer if the size of its elements equals ElementSize, or,
6518// for ElementSize == 8, to its representation as an array of unsiged
6519// char. Return true on success.
6520// Offset is in the unit "nr of ElementSize sized elements".
6523 unsigned ElementSize, uint64_t Offset) {
6524 assert(V && "V should not be null.");
6525 assert((ElementSize % 8) == 0 &&
6526 "ElementSize expected to be a multiple of the size of a byte.");
6527 unsigned ElementSizeInBytes = ElementSize / 8;
6528
6529 // Drill down into the pointer expression V, ignoring any intervening
6530 // casts, and determine the identity of the object it references along
6531 // with the cumulative byte offset into it.
6532 const GlobalVariable *GV =
6534 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6535 // Fail if V is not based on constant global object.
6536 return false;
6537
6538 const DataLayout &DL = GV->getDataLayout();
6539 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
6540
6541 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
6542 /*AllowNonInbounds*/ true))
6543 // Fail if a constant offset could not be determined.
6544 return false;
6545
6546 uint64_t StartIdx = Off.getLimitedValue();
6547 if (StartIdx == UINT64_MAX)
6548 // Fail if the constant offset is excessive.
6549 return false;
6550
6551 // Off/StartIdx is in the unit of bytes. So we need to convert to number of
6552 // elements. Simply bail out if that isn't possible.
6553 if ((StartIdx % ElementSizeInBytes) != 0)
6554 return false;
6555
6556 Offset += StartIdx / ElementSizeInBytes;
6557 ConstantDataArray *Array = nullptr;
6558 ArrayType *ArrayTy = nullptr;
6559
6560 if (GV->getInitializer()->isNullValue()) {
6561 Type *GVTy = GV->getValueType();
6562 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
6563 uint64_t Length = SizeInBytes / ElementSizeInBytes;
6564
6565 Slice.Array = nullptr;
6566 Slice.Offset = 0;
6567 // Return an empty Slice for undersized constants to let callers
6568 // transform even undefined library calls into simpler, well-defined
6569 // expressions. This is preferable to making the calls although it
6570 // prevents sanitizers from detecting such calls.
6571 Slice.Length = Length < Offset ? 0 : Length - Offset;
6572 return true;
6573 }
6574
6575 auto *Init = const_cast<Constant *>(GV->getInitializer());
6576 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
6577 Type *InitElTy = ArrayInit->getElementType();
6578 if (InitElTy->isIntegerTy(ElementSize)) {
6579 // If Init is an initializer for an array of the expected type
6580 // and size, use it as is.
6581 Array = ArrayInit;
6582 ArrayTy = ArrayInit->getType();
6583 }
6584 }
6585
6586 if (!Array) {
6587 if (ElementSize != 8)
6588 // TODO: Handle conversions to larger integral types.
6589 return false;
6590
6591 // Otherwise extract the portion of the initializer starting
6592 // at Offset as an array of bytes, and reset Offset.
6594 if (!Init)
6595 return false;
6596
6597 Offset = 0;
6599 ArrayTy = dyn_cast<ArrayType>(Init->getType());
6600 }
6601
6602 uint64_t NumElts = ArrayTy->getArrayNumElements();
6603 if (Offset > NumElts)
6604 return false;
6605
6606 Slice.Array = Array;
6607 Slice.Offset = Offset;
6608 Slice.Length = NumElts - Offset;
6609 return true;
6610}
6611
6612/// Extract bytes from the initializer of the constant array V, which need
6613/// not be a nul-terminated string. On success, store the bytes in Str and
6614/// return true. When TrimAtNul is set, Str will contain only the bytes up
6615/// to but not including the first nul. Return false on failure.
6617 bool TrimAtNul) {
6619 if (!getConstantDataArrayInfo(V, Slice, 8))
6620 return false;
6621
6622 if (Slice.Array == nullptr) {
6623 if (TrimAtNul) {
6624 // Return a nul-terminated string even for an empty Slice. This is
6625 // safe because all existing SimplifyLibcalls callers require string
6626 // arguments and the behavior of the functions they fold is undefined
6627 // otherwise. Folding the calls this way is preferable to making
6628 // the undefined library calls, even though it prevents sanitizers
6629 // from reporting such calls.
6630 Str = StringRef();
6631 return true;
6632 }
6633 if (Slice.Length == 1) {
6634 Str = StringRef("", 1);
6635 return true;
6636 }
6637 // We cannot instantiate a StringRef as we do not have an appropriate string
6638 // of 0s at hand.
6639 return false;
6640 }
6641
6642 // Start out with the entire array in the StringRef.
6643 Str = Slice.Array->getAsString();
6644 // Skip over 'offset' bytes.
6645 Str = Str.substr(Slice.Offset);
6646
6647 if (TrimAtNul) {
6648 // Trim off the \0 and anything after it. If the array is not nul
6649 // terminated, we just return the whole end of string. The client may know
6650 // some other way that the string is length-bound.
6651 Str = Str.substr(0, Str.find('\0'));
6652 }
6653 return true;
6654}
6655
6656// These next two are very similar to the above, but also look through PHI
6657// nodes.
6658// TODO: See if we can integrate these two together.
6659
6660/// If we can compute the length of the string pointed to by
6661/// the specified pointer, return 'len+1'. If we can't, return 0.
6664 unsigned CharSize) {
6665 // Look through noop bitcast instructions.
6666 V = V->stripPointerCasts();
6667
6668 // If this is a PHI node, there are two cases: either we have already seen it
6669 // or we haven't.
6670 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
6671 if (!PHIs.insert(PN).second)
6672 return ~0ULL; // already in the set.
6673
6674 // If it was new, see if all the input strings are the same length.
6675 uint64_t LenSoFar = ~0ULL;
6676 for (Value *IncValue : PN->incoming_values()) {
6677 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
6678 if (Len == 0) return 0; // Unknown length -> unknown.
6679
6680 if (Len == ~0ULL) continue;
6681
6682 if (Len != LenSoFar && LenSoFar != ~0ULL)
6683 return 0; // Disagree -> unknown.
6684 LenSoFar = Len;
6685 }
6686
6687 // Success, all agree.
6688 return LenSoFar;
6689 }
6690
6691 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
6692 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
6693 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
6694 if (Len1 == 0) return 0;
6695 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
6696 if (Len2 == 0) return 0;
6697 if (Len1 == ~0ULL) return Len2;
6698 if (Len2 == ~0ULL) return Len1;
6699 if (Len1 != Len2) return 0;
6700 return Len1;
6701 }
6702
6703 // Otherwise, see if we can read the string.
6705 if (!getConstantDataArrayInfo(V, Slice, CharSize))
6706 return 0;
6707
6708 if (Slice.Array == nullptr)
6709 // Zeroinitializer (including an empty one).
6710 return 1;
6711
6712 // Search for the first nul character. Return a conservative result even
6713 // when there is no nul. This is safe since otherwise the string function
6714 // being folded such as strlen is undefined, and can be preferable to
6715 // making the undefined library call.
6716 unsigned NullIndex = 0;
6717 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
6718 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6719 break;
6720 }
6721
6722 return NullIndex + 1;
6723}
6724
6725/// If we can compute the length of the string pointed to by
6726/// the specified pointer, return 'len+1'. If we can't, return 0.
6727uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
6728 if (!V->getType()->isPointerTy())
6729 return 0;
6730
6732 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
6733 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
6734 // an empty string as a length.
6735 return Len == ~0ULL ? 1 : Len;
6736}
6737
6738const Value *
6740 bool MustPreserveNullness) {
6741 assert(Call &&
6742 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6743 if (const Value *RV = Call->getReturnedArgOperand())
6744 return RV;
6745 // This can be used only as a aliasing property.
6747 Call, MustPreserveNullness))
6748 return Call->getArgOperand(0);
6749 return nullptr;
6750}
6751
6753 const CallBase *Call, bool MustPreserveNullness) {
6754 switch (Call->getIntrinsicID()) {
6755 case Intrinsic::launder_invariant_group:
6756 case Intrinsic::strip_invariant_group:
6757 case Intrinsic::aarch64_irg:
6758 case Intrinsic::aarch64_tagp:
6759 // The amdgcn_make_buffer_rsrc function does not alter the address of the
6760 // input pointer (and thus preserve null-ness for the purposes of escape
6761 // analysis, which is where the MustPreserveNullness flag comes in to play).
6762 // However, it will not necessarily map ptr addrspace(N) null to ptr
6763 // addrspace(8) null, aka the "null descriptor", which has "all loads return
6764 // 0, all stores are dropped" semantics. Given the context of this intrinsic
6765 // list, no one should be relying on such a strict interpretation of
6766 // MustPreserveNullness (and, at time of writing, they are not), but we
6767 // document this fact out of an abundance of caution.
6768 case Intrinsic::amdgcn_make_buffer_rsrc:
6769 return true;
6770 case Intrinsic::ptrmask:
6771 return !MustPreserveNullness;
6772 case Intrinsic::threadlocal_address:
6773 // The underlying variable changes with thread ID. The Thread ID may change
6774 // at coroutine suspend points.
6775 return !Call->getParent()->getParent()->isPresplitCoroutine();
6776 default:
6777 return false;
6778 }
6779}
6780
6781/// \p PN defines a loop-variant pointer to an object. Check if the
6782/// previous iteration of the loop was referring to the same object as \p PN.
6784 const LoopInfo *LI) {
6785 // Find the loop-defined value.
6786 Loop *L = LI->getLoopFor(PN->getParent());
6787 if (PN->getNumIncomingValues() != 2)
6788 return true;
6789
6790 // Find the value from previous iteration.
6791 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
6792 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
6793 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
6794 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
6795 return true;
6796
6797 // If a new pointer is loaded in the loop, the pointer references a different
6798 // object in every iteration. E.g.:
6799 // for (i)
6800 // int *p = a[i];
6801 // ...
6802 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
6803 if (!L->isLoopInvariant(Load->getPointerOperand()))
6804 return false;
6805 return true;
6806}
6807
6808const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
6809 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
6810 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
6811 const Value *PtrOp = GEP->getPointerOperand();
6812 if (!PtrOp->getType()->isPointerTy()) // Only handle scalar pointer base.
6813 return V;
6814 V = PtrOp;
6815 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
6816 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
6817 Value *NewV = cast<Operator>(V)->getOperand(0);
6818 if (!NewV->getType()->isPointerTy())
6819 return V;
6820 V = NewV;
6821 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
6822 if (GA->isInterposable())
6823 return V;
6824 V = GA->getAliasee();
6825 } else {
6826 if (auto *PHI = dyn_cast<PHINode>(V)) {
6827 // Look through single-arg phi nodes created by LCSSA.
6828 if (PHI->getNumIncomingValues() == 1) {
6829 V = PHI->getIncomingValue(0);
6830 continue;
6831 }
6832 } else if (auto *Call = dyn_cast<CallBase>(V)) {
6833 // CaptureTracking can know about special capturing properties of some
6834 // intrinsics like launder.invariant.group, that can't be expressed with
6835 // the attributes, but have properties like returning aliasing pointer.
6836 // Because some analysis may assume that nocaptured pointer is not
6837 // returned from some special intrinsic (because function would have to
6838 // be marked with returns attribute), it is crucial to use this function
6839 // because it should be in sync with CaptureTracking. Not using it may
6840 // cause weird miscompilations where 2 aliasing pointers are assumed to
6841 // noalias.
6842 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
6843 V = RP;
6844 continue;
6845 }
6846 }
6847
6848 return V;
6849 }
6850 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
6851 }
6852 return V;
6853}
6854
6857 const LoopInfo *LI, unsigned MaxLookup) {
6860 Worklist.push_back(V);
6861 do {
6862 const Value *P = Worklist.pop_back_val();
6863 P = getUnderlyingObject(P, MaxLookup);
6864
6865 if (!Visited.insert(P).second)
6866 continue;
6867
6868 if (auto *SI = dyn_cast<SelectInst>(P)) {
6869 Worklist.push_back(SI->getTrueValue());
6870 Worklist.push_back(SI->getFalseValue());
6871 continue;
6872 }
6873
6874 if (auto *PN = dyn_cast<PHINode>(P)) {
6875 // If this PHI changes the underlying object in every iteration of the
6876 // loop, don't look through it. Consider:
6877 // int **A;
6878 // for (i) {
6879 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
6880 // Curr = A[i];
6881 // *Prev, *Curr;
6882 //
6883 // Prev is tracking Curr one iteration behind so they refer to different
6884 // underlying objects.
6885 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
6887 append_range(Worklist, PN->incoming_values());
6888 else
6889 Objects.push_back(P);
6890 continue;
6891 }
6892
6893 Objects.push_back(P);
6894 } while (!Worklist.empty());
6895}
6896
6898 const unsigned MaxVisited = 8;
6899
6902 Worklist.push_back(V);
6903 const Value *Object = nullptr;
6904 // Used as fallback if we can't find a common underlying object through
6905 // recursion.
6906 bool First = true;
6907 const Value *FirstObject = getUnderlyingObject(V);
6908 do {
6909 const Value *P = Worklist.pop_back_val();
6910 P = First ? FirstObject : getUnderlyingObject(P);
6911 First = false;
6912
6913 if (!Visited.insert(P).second)
6914 continue;
6915
6916 if (Visited.size() == MaxVisited)
6917 return FirstObject;
6918
6919 if (auto *SI = dyn_cast<SelectInst>(P)) {
6920 Worklist.push_back(SI->getTrueValue());
6921 Worklist.push_back(SI->getFalseValue());
6922 continue;
6923 }
6924
6925 if (auto *PN = dyn_cast<PHINode>(P)) {
6926 append_range(Worklist, PN->incoming_values());
6927 continue;
6928 }
6929
6930 if (!Object)
6931 Object = P;
6932 else if (Object != P)
6933 return FirstObject;
6934 } while (!Worklist.empty());
6935
6936 return Object ? Object : FirstObject;
6937}
6938
6939/// This is the function that does the work of looking through basic
6940/// ptrtoint+arithmetic+inttoptr sequences.
6941static const Value *getUnderlyingObjectFromInt(const Value *V) {
6942 do {
6943 if (const Operator *U = dyn_cast<Operator>(V)) {
6944 // If we find a ptrtoint, we can transfer control back to the
6945 // regular getUnderlyingObjectFromInt.
6946 if (U->getOpcode() == Instruction::PtrToInt)
6947 return U->getOperand(0);
6948 // If we find an add of a constant, a multiplied value, or a phi, it's
6949 // likely that the other operand will lead us to the base
6950 // object. We don't have to worry about the case where the
6951 // object address is somehow being computed by the multiply,
6952 // because our callers only care when the result is an
6953 // identifiable object.
6954 if (U->getOpcode() != Instruction::Add ||
6955 (!isa<ConstantInt>(U->getOperand(1)) &&
6956 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
6957 !isa<PHINode>(U->getOperand(1))))
6958 return V;
6959 V = U->getOperand(0);
6960 } else {
6961 return V;
6962 }
6963 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
6964 } while (true);
6965}
6966
6967/// This is a wrapper around getUnderlyingObjects and adds support for basic
6968/// ptrtoint+arithmetic+inttoptr sequences.
6969/// It returns false if unidentified object is found in getUnderlyingObjects.
6971 SmallVectorImpl<Value *> &Objects) {
6973 SmallVector<const Value *, 4> Working(1, V);
6974 do {
6975 V = Working.pop_back_val();
6976
6978 getUnderlyingObjects(V, Objs);
6979
6980 for (const Value *V : Objs) {
6981 if (!Visited.insert(V).second)
6982 continue;
6983 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
6984 const Value *O =
6985 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
6986 if (O->getType()->isPointerTy()) {
6987 Working.push_back(O);
6988 continue;
6989 }
6990 }
6991 // If getUnderlyingObjects fails to find an identifiable object,
6992 // getUnderlyingObjectsForCodeGen also fails for safety.
6993 if (!isIdentifiedObject(V)) {
6994 Objects.clear();
6995 return false;
6996 }
6997 Objects.push_back(const_cast<Value *>(V));
6998 }
6999 } while (!Working.empty());
7000 return true;
7001}
7002
7004 AllocaInst *Result = nullptr;
7006 SmallVector<Value *, 4> Worklist;
7007
7008 auto AddWork = [&](Value *V) {
7009 if (Visited.insert(V).second)
7010 Worklist.push_back(V);
7011 };
7012
7013 AddWork(V);
7014 do {
7015 V = Worklist.pop_back_val();
7016 assert(Visited.count(V));
7017
7018 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
7019 if (Result && Result != AI)
7020 return nullptr;
7021 Result = AI;
7022 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
7023 AddWork(CI->getOperand(0));
7024 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
7025 for (Value *IncValue : PN->incoming_values())
7026 AddWork(IncValue);
7027 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
7028 AddWork(SI->getTrueValue());
7029 AddWork(SI->getFalseValue());
7031 if (OffsetZero && !GEP->hasAllZeroIndices())
7032 return nullptr;
7033 AddWork(GEP->getPointerOperand());
7034 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
7035 Value *Returned = CB->getReturnedArgOperand();
7036 if (Returned)
7037 AddWork(Returned);
7038 else
7039 return nullptr;
7040 } else {
7041 return nullptr;
7042 }
7043 } while (!Worklist.empty());
7044
7045 return Result;
7046}
7047
7049 const Value *V, bool AllowLifetime, bool AllowDroppable) {
7050 for (const User *U : V->users()) {
7052 if (!II)
7053 return false;
7054
7055 if (AllowLifetime && II->isLifetimeStartOrEnd())
7056 continue;
7057
7058 if (AllowDroppable && II->isDroppable())
7059 continue;
7060
7061 return false;
7062 }
7063 return true;
7064}
7065
7068 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
7069}
7072 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
7073}
7074
7076 if (auto *II = dyn_cast<IntrinsicInst>(I))
7077 return isTriviallyVectorizable(II->getIntrinsicID());
7078 auto *Shuffle = dyn_cast<ShuffleVectorInst>(I);
7079 return (!Shuffle || Shuffle->isSelect()) &&
7081}
7082
7084 const Instruction *Inst, const Instruction *CtxI, AssumptionCache *AC,
7085 const DominatorTree *DT, const TargetLibraryInfo *TLI, bool UseVariableInfo,
7086 bool IgnoreUBImplyingAttrs) {
7087 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
7088 AC, DT, TLI, UseVariableInfo,
7089 IgnoreUBImplyingAttrs);
7090}
7091
7093 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
7094 AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI,
7095 bool UseVariableInfo, bool IgnoreUBImplyingAttrs) {
7096#ifndef NDEBUG
7097 if (Inst->getOpcode() != Opcode) {
7098 // Check that the operands are actually compatible with the Opcode override.
7099 auto hasEqualReturnAndLeadingOperandTypes =
7100 [](const Instruction *Inst, unsigned NumLeadingOperands) {
7101 if (Inst->getNumOperands() < NumLeadingOperands)
7102 return false;
7103 const Type *ExpectedType = Inst->getType();
7104 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
7105 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
7106 return false;
7107 return true;
7108 };
7110 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
7111 assert(!Instruction::isUnaryOp(Opcode) ||
7112 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
7113 }
7114#endif
7115
7116 switch (Opcode) {
7117 default:
7118 return true;
7119 case Instruction::UDiv:
7120 case Instruction::URem: {
7121 // x / y is undefined if y == 0.
7122 const APInt *V;
7123 if (match(Inst->getOperand(1), m_APInt(V)))
7124 return *V != 0;
7125 return false;
7126 }
7127 case Instruction::SDiv:
7128 case Instruction::SRem: {
7129 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
7130 const APInt *Numerator, *Denominator;
7131 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
7132 return false;
7133 // We cannot hoist this division if the denominator is 0.
7134 if (*Denominator == 0)
7135 return false;
7136 // It's safe to hoist if the denominator is not 0 or -1.
7137 if (!Denominator->isAllOnes())
7138 return true;
7139 // At this point we know that the denominator is -1. It is safe to hoist as
7140 // long we know that the numerator is not INT_MIN.
7141 if (match(Inst->getOperand(0), m_APInt(Numerator)))
7142 return !Numerator->isMinSignedValue();
7143 // The numerator *might* be MinSignedValue.
7144 return false;
7145 }
7146 case Instruction::Load: {
7147 if (!UseVariableInfo)
7148 return false;
7149
7150 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
7151 if (!LI)
7152 return false;
7153 if (mustSuppressSpeculation(*LI))
7154 return false;
7155 const DataLayout &DL = LI->getDataLayout();
7157 LI->getType(), LI->getAlign(), DL,
7158 CtxI, AC, DT, TLI);
7159 }
7160 case Instruction::Call: {
7161 auto *CI = dyn_cast<const CallInst>(Inst);
7162 if (!CI)
7163 return false;
7164 const Function *Callee = CI->getCalledFunction();
7165
7166 // The called function could have undefined behavior or side-effects, even
7167 // if marked readnone nounwind.
7168 if (!Callee || !Callee->isSpeculatable())
7169 return false;
7170 // Since the operands may be changed after hoisting, undefined behavior may
7171 // be triggered by some UB-implying attributes.
7172 return IgnoreUBImplyingAttrs || !CI->hasUBImplyingAttrs();
7173 }
7174 case Instruction::VAArg:
7175 case Instruction::Alloca:
7176 case Instruction::Invoke:
7177 case Instruction::CallBr:
7178 case Instruction::PHI:
7179 case Instruction::Store:
7180 case Instruction::Ret:
7181 case Instruction::Br:
7182 case Instruction::IndirectBr:
7183 case Instruction::Switch:
7184 case Instruction::Unreachable:
7185 case Instruction::Fence:
7186 case Instruction::AtomicRMW:
7187 case Instruction::AtomicCmpXchg:
7188 case Instruction::LandingPad:
7189 case Instruction::Resume:
7190 case Instruction::CatchSwitch:
7191 case Instruction::CatchPad:
7192 case Instruction::CatchRet:
7193 case Instruction::CleanupPad:
7194 case Instruction::CleanupRet:
7195 return false; // Misc instructions which have effects
7196 }
7197}
7198
7200 if (I.mayReadOrWriteMemory())
7201 // Memory dependency possible
7202 return true;
7204 // Can't move above a maythrow call or infinite loop. Or if an
7205 // inalloca alloca, above a stacksave call.
7206 return true;
7208 // 1) Can't reorder two inf-loop calls, even if readonly
7209 // 2) Also can't reorder an inf-loop call below a instruction which isn't
7210 // safe to speculative execute. (Inverse of above)
7211 return true;
7212 return false;
7213}
7214
7215/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
7229
7230/// Combine constant ranges from computeConstantRange() and computeKnownBits().
7233 bool ForSigned,
7234 const SimplifyQuery &SQ) {
7235 ConstantRange CR1 =
7236 ConstantRange::fromKnownBits(V.getKnownBits(SQ), ForSigned);
7237 ConstantRange CR2 = computeConstantRange(V, ForSigned, SQ.IIQ.UseInstrInfo);
7240 return CR1.intersectWith(CR2, RangeType);
7241}
7242
7244 const Value *RHS,
7245 const SimplifyQuery &SQ,
7246 bool IsNSW) {
7247 ConstantRange LHSRange =
7248 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
7249 ConstantRange RHSRange =
7250 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
7251
7252 // mul nsw of two non-negative numbers is also nuw.
7253 if (IsNSW && LHSRange.isAllNonNegative() && RHSRange.isAllNonNegative())
7255
7256 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
7257}
7258
7260 const Value *RHS,
7261 const SimplifyQuery &SQ) {
7262 // Multiplying n * m significant bits yields a result of n + m significant
7263 // bits. If the total number of significant bits does not exceed the
7264 // result bit width (minus 1), there is no overflow.
7265 // This means if we have enough leading sign bits in the operands
7266 // we can guarantee that the result does not overflow.
7267 // Ref: "Hacker's Delight" by Henry Warren
7268 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
7269
7270 // Note that underestimating the number of sign bits gives a more
7271 // conservative answer.
7272 unsigned SignBits =
7273 ::ComputeNumSignBits(LHS, SQ) + ::ComputeNumSignBits(RHS, SQ);
7274
7275 // First handle the easy case: if we have enough sign bits there's
7276 // definitely no overflow.
7277 if (SignBits > BitWidth + 1)
7279
7280 // There are two ambiguous cases where there can be no overflow:
7281 // SignBits == BitWidth + 1 and
7282 // SignBits == BitWidth
7283 // The second case is difficult to check, therefore we only handle the
7284 // first case.
7285 if (SignBits == BitWidth + 1) {
7286 // It overflows only when both arguments are negative and the true
7287 // product is exactly the minimum negative number.
7288 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
7289 // For simplicity we just check if at least one side is not negative.
7290 KnownBits LHSKnown = computeKnownBits(LHS, SQ);
7291 KnownBits RHSKnown = computeKnownBits(RHS, SQ);
7292 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
7294 }
7296}
7297
7300 const WithCache<const Value *> &RHS,
7301 const SimplifyQuery &SQ) {
7302 ConstantRange LHSRange =
7303 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
7304 ConstantRange RHSRange =
7305 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
7306 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
7307}
7308
7309static OverflowResult
7312 const AddOperator *Add, const SimplifyQuery &SQ) {
7313 if (Add && Add->hasNoSignedWrap()) {
7315 }
7316
7317 // If LHS and RHS each have at least two sign bits, the addition will look
7318 // like
7319 //
7320 // XX..... +
7321 // YY.....
7322 //
7323 // If the carry into the most significant position is 0, X and Y can't both
7324 // be 1 and therefore the carry out of the addition is also 0.
7325 //
7326 // If the carry into the most significant position is 1, X and Y can't both
7327 // be 0 and therefore the carry out of the addition is also 1.
7328 //
7329 // Since the carry into the most significant position is always equal to
7330 // the carry out of the addition, there is no signed overflow.
7331 if (::ComputeNumSignBits(LHS, SQ) > 1 && ::ComputeNumSignBits(RHS, SQ) > 1)
7333
7334 ConstantRange LHSRange =
7335 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
7336 ConstantRange RHSRange =
7337 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
7338 OverflowResult OR =
7339 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
7341 return OR;
7342
7343 // The remaining code needs Add to be available. Early returns if not so.
7344 if (!Add)
7346
7347 // If the sign of Add is the same as at least one of the operands, this add
7348 // CANNOT overflow. If this can be determined from the known bits of the
7349 // operands the above signedAddMayOverflow() check will have already done so.
7350 // The only other way to improve on the known bits is from an assumption, so
7351 // call computeKnownBitsFromContext() directly.
7352 bool LHSOrRHSKnownNonNegative =
7353 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
7354 bool LHSOrRHSKnownNegative =
7355 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
7356 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7357 KnownBits AddKnown(LHSRange.getBitWidth());
7358 computeKnownBitsFromContext(Add, AddKnown, SQ);
7359 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
7360 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
7362 }
7363
7365}
7366
7368 const Value *RHS,
7369 const SimplifyQuery &SQ) {
7370 // X - (X % ?)
7371 // The remainder of a value can't have greater magnitude than itself,
7372 // so the subtraction can't overflow.
7373
7374 // X - (X -nuw ?)
7375 // In the minimal case, this would simplify to "?", so there's no subtract
7376 // at all. But if this analysis is used to peek through casts, for example,
7377 // then determining no-overflow may allow other transforms.
7378
7379 // TODO: There are other patterns like this.
7380 // See simplifyICmpWithBinOpOnLHS() for candidates.
7381 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
7382 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
7383 if (isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
7385
7386 if (auto C = isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, SQ.CxtI,
7387 SQ.DL)) {
7388 if (*C)
7391 }
7392
7393 ConstantRange LHSRange =
7394 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
7395 ConstantRange RHSRange =
7396 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
7397 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
7398}
7399
7401 const Value *RHS,
7402 const SimplifyQuery &SQ) {
7403 // X - (X % ?)
7404 // The remainder of a value can't have greater magnitude than itself,
7405 // so the subtraction can't overflow.
7406
7407 // X - (X -nsw ?)
7408 // In the minimal case, this would simplify to "?", so there's no subtract
7409 // at all. But if this analysis is used to peek through casts, for example,
7410 // then determining no-overflow may allow other transforms.
7411 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
7412 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
7413 if (isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
7415
7416 // If LHS and RHS each have at least two sign bits, the subtraction
7417 // cannot overflow.
7418 if (::ComputeNumSignBits(LHS, SQ) > 1 && ::ComputeNumSignBits(RHS, SQ) > 1)
7420
7421 ConstantRange LHSRange =
7422 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
7423 ConstantRange RHSRange =
7424 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
7425 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
7426}
7427
7429 const DominatorTree &DT) {
7430 SmallVector<const BranchInst *, 2> GuardingBranches;
7432
7433 for (const User *U : WO->users()) {
7434 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
7435 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
7436
7437 if (EVI->getIndices()[0] == 0)
7438 Results.push_back(EVI);
7439 else {
7440 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
7441
7442 for (const auto *U : EVI->users())
7443 if (const auto *B = dyn_cast<BranchInst>(U)) {
7444 assert(B->isConditional() && "How else is it using an i1?");
7445 GuardingBranches.push_back(B);
7446 }
7447 }
7448 } else {
7449 // We are using the aggregate directly in a way we don't want to analyze
7450 // here (storing it to a global, say).
7451 return false;
7452 }
7453 }
7454
7455 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
7456 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
7457 if (!NoWrapEdge.isSingleEdge())
7458 return false;
7459
7460 // Check if all users of the add are provably no-wrap.
7461 for (const auto *Result : Results) {
7462 // If the extractvalue itself is not executed on overflow, the we don't
7463 // need to check each use separately, since domination is transitive.
7464 if (DT.dominates(NoWrapEdge, Result->getParent()))
7465 continue;
7466
7467 for (const auto &RU : Result->uses())
7468 if (!DT.dominates(NoWrapEdge, RU))
7469 return false;
7470 }
7471
7472 return true;
7473 };
7474
7475 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7476}
7477
7478/// Shifts return poison if shiftwidth is larger than the bitwidth.
7479static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
7480 auto *C = dyn_cast<Constant>(ShiftAmount);
7481 if (!C)
7482 return false;
7483
7484 // Shifts return poison if shiftwidth is larger than the bitwidth.
7486 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
7487 unsigned NumElts = FVTy->getNumElements();
7488 for (unsigned i = 0; i < NumElts; ++i)
7489 ShiftAmounts.push_back(C->getAggregateElement(i));
7490 } else if (isa<ScalableVectorType>(C->getType()))
7491 return false; // Can't tell, just return false to be safe
7492 else
7493 ShiftAmounts.push_back(C);
7494
7495 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
7496 auto *CI = dyn_cast_or_null<ConstantInt>(C);
7497 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
7498 });
7499
7500 return Safe;
7501}
7502
7508
7510 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
7511}
7512
7514 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
7515}
7516
7518 bool ConsiderFlagsAndMetadata) {
7519
7520 if (ConsiderFlagsAndMetadata && includesPoison(Kind) &&
7521 Op->hasPoisonGeneratingAnnotations())
7522 return true;
7523
7524 unsigned Opcode = Op->getOpcode();
7525
7526 // Check whether opcode is a poison/undef-generating operation
7527 switch (Opcode) {
7528 case Instruction::Shl:
7529 case Instruction::AShr:
7530 case Instruction::LShr:
7531 return includesPoison(Kind) && !shiftAmountKnownInRange(Op->getOperand(1));
7532 case Instruction::FPToSI:
7533 case Instruction::FPToUI:
7534 // fptosi/ui yields poison if the resulting value does not fit in the
7535 // destination type.
7536 return true;
7537 case Instruction::Call:
7538 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
7539 switch (II->getIntrinsicID()) {
7540 // TODO: Add more intrinsics.
7541 case Intrinsic::ctlz:
7542 case Intrinsic::cttz:
7543 case Intrinsic::abs:
7544 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
7545 return false;
7546 break;
7547 case Intrinsic::sshl_sat:
7548 case Intrinsic::ushl_sat:
7549 if (!includesPoison(Kind) ||
7550 shiftAmountKnownInRange(II->getArgOperand(1)))
7551 return false;
7552 break;
7553 }
7554 }
7555 [[fallthrough]];
7556 case Instruction::CallBr:
7557 case Instruction::Invoke: {
7558 const auto *CB = cast<CallBase>(Op);
7559 return !CB->hasRetAttr(Attribute::NoUndef) &&
7560 !CB->hasFnAttr(Attribute::NoCreateUndefOrPoison);
7561 }
7562 case Instruction::InsertElement:
7563 case Instruction::ExtractElement: {
7564 // If index exceeds the length of the vector, it returns poison
7565 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
7566 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7567 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
7568 if (includesPoison(Kind))
7569 return !Idx ||
7570 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7571 return false;
7572 }
7573 case Instruction::ShuffleVector: {
7575 ? cast<ConstantExpr>(Op)->getShuffleMask()
7576 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
7577 return includesPoison(Kind) && is_contained(Mask, PoisonMaskElem);
7578 }
7579 case Instruction::FNeg:
7580 case Instruction::PHI:
7581 case Instruction::Select:
7582 case Instruction::ExtractValue:
7583 case Instruction::InsertValue:
7584 case Instruction::Freeze:
7585 case Instruction::ICmp:
7586 case Instruction::FCmp:
7587 case Instruction::GetElementPtr:
7588 return false;
7589 case Instruction::AddrSpaceCast:
7590 return true;
7591 default: {
7592 const auto *CE = dyn_cast<ConstantExpr>(Op);
7593 if (isa<CastInst>(Op) || (CE && CE->isCast()))
7594 return false;
7595 else if (Instruction::isBinaryOp(Opcode))
7596 return false;
7597 // Be conservative and return true.
7598 return true;
7599 }
7600 }
7601}
7602
7604 bool ConsiderFlagsAndMetadata) {
7605 return ::canCreateUndefOrPoison(Op, UndefPoisonKind::UndefOrPoison,
7606 ConsiderFlagsAndMetadata);
7607}
7608
7609bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
7610 return ::canCreateUndefOrPoison(Op, UndefPoisonKind::PoisonOnly,
7611 ConsiderFlagsAndMetadata);
7612}
7613
7614static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V,
7615 unsigned Depth) {
7616 if (ValAssumedPoison == V)
7617 return true;
7618
7619 const unsigned MaxDepth = 2;
7620 if (Depth >= MaxDepth)
7621 return false;
7622
7623 if (const auto *I = dyn_cast<Instruction>(V)) {
7624 if (any_of(I->operands(), [=](const Use &Op) {
7625 return propagatesPoison(Op) &&
7626 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7627 }))
7628 return true;
7629
7630 // V = extractvalue V0, idx
7631 // V2 = extractvalue V0, idx2
7632 // V0's elements are all poison or not. (e.g., add_with_overflow)
7633 const WithOverflowInst *II;
7635 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
7636 llvm::is_contained(II->args(), ValAssumedPoison)))
7637 return true;
7638 }
7639 return false;
7640}
7641
7642static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
7643 unsigned Depth) {
7644 if (isGuaranteedNotToBePoison(ValAssumedPoison))
7645 return true;
7646
7647 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
7648 return true;
7649
7650 const unsigned MaxDepth = 2;
7651 if (Depth >= MaxDepth)
7652 return false;
7653
7654 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
7655 if (I && !canCreatePoison(cast<Operator>(I))) {
7656 return all_of(I->operands(), [=](const Value *Op) {
7657 return impliesPoison(Op, V, Depth + 1);
7658 });
7659 }
7660 return false;
7661}
7662
7663bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
7664 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
7665}
7666
7667static bool programUndefinedIfUndefOrPoison(const Value *V, bool PoisonOnly);
7668
7670 const Value *V, AssumptionCache *AC, const Instruction *CtxI,
7671 const DominatorTree *DT, unsigned Depth, UndefPoisonKind Kind) {
7673 return false;
7674
7675 if (isa<MetadataAsValue>(V))
7676 return false;
7677
7678 if (const auto *A = dyn_cast<Argument>(V)) {
7679 if (A->hasAttribute(Attribute::NoUndef) ||
7680 A->hasAttribute(Attribute::Dereferenceable) ||
7681 A->hasAttribute(Attribute::DereferenceableOrNull))
7682 return true;
7683 }
7684
7685 if (auto *C = dyn_cast<Constant>(V)) {
7686 if (isa<PoisonValue>(C))
7687 return !includesPoison(Kind);
7688
7689 if (isa<UndefValue>(C))
7690 return !includesUndef(Kind);
7691
7694 return true;
7695
7696 if (C->getType()->isVectorTy()) {
7697 if (isa<ConstantExpr>(C)) {
7698 // Scalable vectors can use a ConstantExpr to build a splat.
7699 if (Constant *SplatC = C->getSplatValue())
7700 if (isa<ConstantInt>(SplatC) || isa<ConstantFP>(SplatC))
7701 return true;
7702 } else {
7703 if (includesUndef(Kind) && C->containsUndefElement())
7704 return false;
7705 if (includesPoison(Kind) && C->containsPoisonElement())
7706 return false;
7707 return !C->containsConstantExpression();
7708 }
7709 }
7710 }
7711
7712 // Strip cast operations from a pointer value.
7713 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
7714 // inbounds with zero offset. To guarantee that the result isn't poison, the
7715 // stripped pointer is checked as it has to be pointing into an allocated
7716 // object or be null `null` to ensure `inbounds` getelement pointers with a
7717 // zero offset could not produce poison.
7718 // It can strip off addrspacecast that do not change bit representation as
7719 // well. We believe that such addrspacecast is equivalent to no-op.
7720 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7721 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
7722 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
7723 return true;
7724
7725 auto OpCheck = [&](const Value *V) {
7726 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, Kind);
7727 };
7728
7729 if (auto *Opr = dyn_cast<Operator>(V)) {
7730 // If the value is a freeze instruction, then it can never
7731 // be undef or poison.
7732 if (isa<FreezeInst>(V))
7733 return true;
7734
7735 if (const auto *CB = dyn_cast<CallBase>(V)) {
7736 if (CB->hasRetAttr(Attribute::NoUndef) ||
7737 CB->hasRetAttr(Attribute::Dereferenceable) ||
7738 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7739 return true;
7740 }
7741
7742 if (!::canCreateUndefOrPoison(Opr, Kind,
7743 /*ConsiderFlagsAndMetadata=*/true)) {
7744 if (const auto *PN = dyn_cast<PHINode>(V)) {
7745 unsigned Num = PN->getNumIncomingValues();
7746 bool IsWellDefined = true;
7747 for (unsigned i = 0; i < Num; ++i) {
7748 if (PN == PN->getIncomingValue(i))
7749 continue;
7750 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7751 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
7752 DT, Depth + 1, Kind)) {
7753 IsWellDefined = false;
7754 break;
7755 }
7756 }
7757 if (IsWellDefined)
7758 return true;
7759 } else if (auto *Splat = isa<ShuffleVectorInst>(Opr) ? getSplatValue(Opr)
7760 : nullptr) {
7761 // For splats we only need to check the value being splatted.
7762 if (OpCheck(Splat))
7763 return true;
7764 } else if (all_of(Opr->operands(), OpCheck))
7765 return true;
7766 }
7767 }
7768
7769 if (auto *I = dyn_cast<LoadInst>(V))
7770 if (I->hasMetadata(LLVMContext::MD_noundef) ||
7771 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7772 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7773 return true;
7774
7776 return true;
7777
7778 // CxtI may be null or a cloned instruction.
7779 if (!CtxI || !CtxI->getParent() || !DT)
7780 return false;
7781
7782 auto *DNode = DT->getNode(CtxI->getParent());
7783 if (!DNode)
7784 // Unreachable block
7785 return false;
7786
7787 // If V is used as a branch condition before reaching CtxI, V cannot be
7788 // undef or poison.
7789 // br V, BB1, BB2
7790 // BB1:
7791 // CtxI ; V cannot be undef or poison here
7792 auto *Dominator = DNode->getIDom();
7793 // This check is purely for compile time reasons: we can skip the IDom walk
7794 // if what we are checking for includes undef and the value is not an integer.
7795 if (!includesUndef(Kind) || V->getType()->isIntegerTy())
7796 while (Dominator) {
7797 auto *TI = Dominator->getBlock()->getTerminator();
7798
7799 Value *Cond = nullptr;
7800 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
7801 if (BI->isConditional())
7802 Cond = BI->getCondition();
7803 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
7804 Cond = SI->getCondition();
7805 }
7806
7807 if (Cond) {
7808 if (Cond == V)
7809 return true;
7810 else if (!includesUndef(Kind) && isa<Operator>(Cond)) {
7811 // For poison, we can analyze further
7812 auto *Opr = cast<Operator>(Cond);
7813 if (any_of(Opr->operands(), [V](const Use &U) {
7814 return V == U && propagatesPoison(U);
7815 }))
7816 return true;
7817 }
7818 }
7819
7820 Dominator = Dominator->getIDom();
7821 }
7822
7823 if (AC && getKnowledgeValidInContext(V, {Attribute::NoUndef}, *AC, CtxI, DT))
7824 return true;
7825
7826 return false;
7827}
7828
7830 const Instruction *CtxI,
7831 const DominatorTree *DT,
7832 unsigned Depth) {
7833 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7835}
7836
7838 const Instruction *CtxI,
7839 const DominatorTree *DT, unsigned Depth) {
7840 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7842}
7843
7845 const Instruction *CtxI,
7846 const DominatorTree *DT, unsigned Depth) {
7847 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7849}
7850
7851/// Return true if undefined behavior would provably be executed on the path to
7852/// OnPathTo if Root produced a posion result. Note that this doesn't say
7853/// anything about whether OnPathTo is actually executed or whether Root is
7854/// actually poison. This can be used to assess whether a new use of Root can
7855/// be added at a location which is control equivalent with OnPathTo (such as
7856/// immediately before it) without introducing UB which didn't previously
7857/// exist. Note that a false result conveys no information.
7859 Instruction *OnPathTo,
7860 DominatorTree *DT) {
7861 // Basic approach is to assume Root is poison, propagate poison forward
7862 // through all users we can easily track, and then check whether any of those
7863 // users are provable UB and must execute before out exiting block might
7864 // exit.
7865
7866 // The set of all recursive users we've visited (which are assumed to all be
7867 // poison because of said visit)
7870 Worklist.push_back(Root);
7871 while (!Worklist.empty()) {
7872 const Instruction *I = Worklist.pop_back_val();
7873
7874 // If we know this must trigger UB on a path leading our target.
7875 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
7876 return true;
7877
7878 // If we can't analyze propagation through this instruction, just skip it
7879 // and transitive users. Safe as false is a conservative result.
7880 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) {
7881 return KnownPoison.contains(U) && propagatesPoison(U);
7882 }))
7883 continue;
7884
7885 if (KnownPoison.insert(I).second)
7886 for (const User *User : I->users())
7887 Worklist.push_back(cast<Instruction>(User));
7888 }
7889
7890 // Might be non-UB, or might have a path we couldn't prove must execute on
7891 // way to exiting bb.
7892 return false;
7893}
7894
7896 const SimplifyQuery &SQ) {
7897 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
7898 Add, SQ);
7899}
7900
7903 const WithCache<const Value *> &RHS,
7904 const SimplifyQuery &SQ) {
7905 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, SQ);
7906}
7907
7909 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
7910 // of time because it's possible for another thread to interfere with it for an
7911 // arbitrary length of time, but programs aren't allowed to rely on that.
7912
7913 // If there is no successor, then execution can't transfer to it.
7914 if (isa<ReturnInst>(I))
7915 return false;
7917 return false;
7918
7919 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
7920 // Instruction::willReturn.
7921 //
7922 // FIXME: Move this check into Instruction::willReturn.
7923 if (isa<CatchPadInst>(I)) {
7924 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
7925 default:
7926 // A catchpad may invoke exception object constructors and such, which
7927 // in some languages can be arbitrary code, so be conservative by default.
7928 return false;
7930 // For CoreCLR, it just involves a type test.
7931 return true;
7932 }
7933 }
7934
7935 // An instruction that returns without throwing must transfer control flow
7936 // to a successor.
7937 return !I->mayThrow() && I->willReturn();
7938}
7939
7941 // TODO: This is slightly conservative for invoke instruction since exiting
7942 // via an exception *is* normal control for them.
7943 for (const Instruction &I : *BB)
7945 return false;
7946 return true;
7947}
7948
7955
7958 assert(ScanLimit && "scan limit must be non-zero");
7959 for (const Instruction &I : Range) {
7960 if (--ScanLimit == 0)
7961 return false;
7963 return false;
7964 }
7965 return true;
7966}
7967
7969 const Loop *L) {
7970 // The loop header is guaranteed to be executed for every iteration.
7971 //
7972 // FIXME: Relax this constraint to cover all basic blocks that are
7973 // guaranteed to be executed at every iteration.
7974 if (I->getParent() != L->getHeader()) return false;
7975
7976 for (const Instruction &LI : *L->getHeader()) {
7977 if (&LI == I) return true;
7978 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
7979 }
7980 llvm_unreachable("Instruction not contained in its own parent basic block.");
7981}
7982
7984 switch (IID) {
7985 // TODO: Add more intrinsics.
7986 case Intrinsic::sadd_with_overflow:
7987 case Intrinsic::ssub_with_overflow:
7988 case Intrinsic::smul_with_overflow:
7989 case Intrinsic::uadd_with_overflow:
7990 case Intrinsic::usub_with_overflow:
7991 case Intrinsic::umul_with_overflow:
7992 // If an input is a vector containing a poison element, the
7993 // two output vectors (calculated results, overflow bits)'
7994 // corresponding lanes are poison.
7995 return true;
7996 case Intrinsic::ctpop:
7997 case Intrinsic::ctlz:
7998 case Intrinsic::cttz:
7999 case Intrinsic::abs:
8000 case Intrinsic::smax:
8001 case Intrinsic::smin:
8002 case Intrinsic::umax:
8003 case Intrinsic::umin:
8004 case Intrinsic::scmp:
8005 case Intrinsic::is_fpclass:
8006 case Intrinsic::ptrmask:
8007 case Intrinsic::ucmp:
8008 case Intrinsic::bitreverse:
8009 case Intrinsic::bswap:
8010 case Intrinsic::sadd_sat:
8011 case Intrinsic::ssub_sat:
8012 case Intrinsic::sshl_sat:
8013 case Intrinsic::uadd_sat:
8014 case Intrinsic::usub_sat:
8015 case Intrinsic::ushl_sat:
8016 case Intrinsic::smul_fix:
8017 case Intrinsic::smul_fix_sat:
8018 case Intrinsic::umul_fix:
8019 case Intrinsic::umul_fix_sat:
8020 case Intrinsic::pow:
8021 case Intrinsic::powi:
8022 case Intrinsic::sin:
8023 case Intrinsic::sinh:
8024 case Intrinsic::cos:
8025 case Intrinsic::cosh:
8026 case Intrinsic::sincos:
8027 case Intrinsic::sincospi:
8028 case Intrinsic::tan:
8029 case Intrinsic::tanh:
8030 case Intrinsic::asin:
8031 case Intrinsic::acos:
8032 case Intrinsic::atan:
8033 case Intrinsic::atan2:
8034 case Intrinsic::canonicalize:
8035 case Intrinsic::sqrt:
8036 case Intrinsic::exp:
8037 case Intrinsic::exp2:
8038 case Intrinsic::exp10:
8039 case Intrinsic::log:
8040 case Intrinsic::log2:
8041 case Intrinsic::log10:
8042 case Intrinsic::modf:
8043 case Intrinsic::floor:
8044 case Intrinsic::ceil:
8045 case Intrinsic::trunc:
8046 case Intrinsic::rint:
8047 case Intrinsic::nearbyint:
8048 case Intrinsic::round:
8049 case Intrinsic::roundeven:
8050 case Intrinsic::lrint:
8051 case Intrinsic::llrint:
8052 case Intrinsic::fshl:
8053 case Intrinsic::fshr:
8054 return true;
8055 default:
8056 return false;
8057 }
8058}
8059
8060bool llvm::propagatesPoison(const Use &PoisonOp) {
8061 const Operator *I = cast<Operator>(PoisonOp.getUser());
8062 switch (I->getOpcode()) {
8063 case Instruction::Freeze:
8064 case Instruction::PHI:
8065 case Instruction::Invoke:
8066 return false;
8067 case Instruction::Select:
8068 return PoisonOp.getOperandNo() == 0;
8069 case Instruction::Call:
8070 if (auto *II = dyn_cast<IntrinsicInst>(I))
8071 return intrinsicPropagatesPoison(II->getIntrinsicID());
8072 return false;
8073 case Instruction::ICmp:
8074 case Instruction::FCmp:
8075 case Instruction::GetElementPtr:
8076 return true;
8077 default:
8079 return true;
8080
8081 // Be conservative and return false.
8082 return false;
8083 }
8084}
8085
8086/// Enumerates all operands of \p I that are guaranteed to not be undef or
8087/// poison. If the callback \p Handle returns true, stop processing and return
8088/// true. Otherwise, return false.
8089template <typename CallableT>
8091 const CallableT &Handle) {
8092 switch (I->getOpcode()) {
8093 case Instruction::Store:
8094 if (Handle(cast<StoreInst>(I)->getPointerOperand()))
8095 return true;
8096 break;
8097
8098 case Instruction::Load:
8099 if (Handle(cast<LoadInst>(I)->getPointerOperand()))
8100 return true;
8101 break;
8102
8103 // Since dereferenceable attribute imply noundef, atomic operations
8104 // also implicitly have noundef pointers too
8105 case Instruction::AtomicCmpXchg:
8107 return true;
8108 break;
8109
8110 case Instruction::AtomicRMW:
8111 if (Handle(cast<AtomicRMWInst>(I)->getPointerOperand()))
8112 return true;
8113 break;
8114
8115 case Instruction::Call:
8116 case Instruction::Invoke: {
8117 const CallBase *CB = cast<CallBase>(I);
8118 if (CB->isIndirectCall() && Handle(CB->getCalledOperand()))
8119 return true;
8120 for (unsigned i = 0; i < CB->arg_size(); ++i)
8121 if ((CB->paramHasAttr(i, Attribute::NoUndef) ||
8122 CB->paramHasAttr(i, Attribute::Dereferenceable) ||
8123 CB->paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
8124 Handle(CB->getArgOperand(i)))
8125 return true;
8126 break;
8127 }
8128 case Instruction::Ret:
8129 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
8130 Handle(I->getOperand(0)))
8131 return true;
8132 break;
8133 case Instruction::Switch:
8134 if (Handle(cast<SwitchInst>(I)->getCondition()))
8135 return true;
8136 break;
8137 case Instruction::Br: {
8138 auto *BR = cast<BranchInst>(I);
8139 if (BR->isConditional() && Handle(BR->getCondition()))
8140 return true;
8141 break;
8142 }
8143 default:
8144 break;
8145 }
8146
8147 return false;
8148}
8149
8150/// Enumerates all operands of \p I that are guaranteed to not be poison.
8151template <typename CallableT>
8153 const CallableT &Handle) {
8154 if (handleGuaranteedWellDefinedOps(I, Handle))
8155 return true;
8156 switch (I->getOpcode()) {
8157 // Divisors of these operations are allowed to be partially undef.
8158 case Instruction::UDiv:
8159 case Instruction::SDiv:
8160 case Instruction::URem:
8161 case Instruction::SRem:
8162 return Handle(I->getOperand(1));
8163 default:
8164 return false;
8165 }
8166}
8167
8169 const SmallPtrSetImpl<const Value *> &KnownPoison) {
8171 I, [&](const Value *V) { return KnownPoison.count(V); });
8172}
8173
8175 bool PoisonOnly) {
8176 // We currently only look for uses of values within the same basic
8177 // block, as that makes it easier to guarantee that the uses will be
8178 // executed given that Inst is executed.
8179 //
8180 // FIXME: Expand this to consider uses beyond the same basic block. To do
8181 // this, look out for the distinction between post-dominance and strong
8182 // post-dominance.
8183 const BasicBlock *BB = nullptr;
8185 if (const auto *Inst = dyn_cast<Instruction>(V)) {
8186 BB = Inst->getParent();
8187 Begin = Inst->getIterator();
8188 Begin++;
8189 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
8190 if (Arg->getParent()->isDeclaration())
8191 return false;
8192 BB = &Arg->getParent()->getEntryBlock();
8193 Begin = BB->begin();
8194 } else {
8195 return false;
8196 }
8197
8198 // Limit number of instructions we look at, to avoid scanning through large
8199 // blocks. The current limit is chosen arbitrarily.
8200 unsigned ScanLimit = 32;
8201 BasicBlock::const_iterator End = BB->end();
8202
8203 if (!PoisonOnly) {
8204 // Since undef does not propagate eagerly, be conservative & just check
8205 // whether a value is directly passed to an instruction that must take
8206 // well-defined operands.
8207
8208 for (const auto &I : make_range(Begin, End)) {
8209 if (--ScanLimit == 0)
8210 break;
8211
8212 if (handleGuaranteedWellDefinedOps(&I, [V](const Value *WellDefinedOp) {
8213 return WellDefinedOp == V;
8214 }))
8215 return true;
8216
8218 break;
8219 }
8220 return false;
8221 }
8222
8223 // Set of instructions that we have proved will yield poison if Inst
8224 // does.
8225 SmallPtrSet<const Value *, 16> YieldsPoison;
8227
8228 YieldsPoison.insert(V);
8229 Visited.insert(BB);
8230
8231 while (true) {
8232 for (const auto &I : make_range(Begin, End)) {
8233 if (--ScanLimit == 0)
8234 return false;
8235 if (mustTriggerUB(&I, YieldsPoison))
8236 return true;
8238 return false;
8239
8240 // If an operand is poison and propagates it, mark I as yielding poison.
8241 for (const Use &Op : I.operands()) {
8242 if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
8243 YieldsPoison.insert(&I);
8244 break;
8245 }
8246 }
8247
8248 // Special handling for select, which returns poison if its operand 0 is
8249 // poison (handled in the loop above) *or* if both its true/false operands
8250 // are poison (handled here).
8251 if (I.getOpcode() == Instruction::Select &&
8252 YieldsPoison.count(I.getOperand(1)) &&
8253 YieldsPoison.count(I.getOperand(2))) {
8254 YieldsPoison.insert(&I);
8255 }
8256 }
8257
8258 BB = BB->getSingleSuccessor();
8259 if (!BB || !Visited.insert(BB).second)
8260 break;
8261
8262 Begin = BB->getFirstNonPHIIt();
8263 End = BB->end();
8264 }
8265 return false;
8266}
8267
8269 return ::programUndefinedIfUndefOrPoison(Inst, false);
8270}
8271
8273 return ::programUndefinedIfUndefOrPoison(Inst, true);
8274}
8275
8276static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
8277 if (FMF.noNaNs())
8278 return true;
8279
8280 if (auto *C = dyn_cast<ConstantFP>(V))
8281 return !C->isNaN();
8282
8283 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
8284 if (!C->getElementType()->isFloatingPointTy())
8285 return false;
8286 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
8287 if (C->getElementAsAPFloat(I).isNaN())
8288 return false;
8289 }
8290 return true;
8291 }
8292
8294 return true;
8295
8296 return false;
8297}
8298
8299static bool isKnownNonZero(const Value *V) {
8300 if (auto *C = dyn_cast<ConstantFP>(V))
8301 return !C->isZero();
8302
8303 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
8304 if (!C->getElementType()->isFloatingPointTy())
8305 return false;
8306 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
8307 if (C->getElementAsAPFloat(I).isZero())
8308 return false;
8309 }
8310 return true;
8311 }
8312
8313 return false;
8314}
8315
8316/// Match clamp pattern for float types without care about NaNs or signed zeros.
8317/// Given non-min/max outer cmp/select from the clamp pattern this
8318/// function recognizes if it can be substitued by a "canonical" min/max
8319/// pattern.
8321 Value *CmpLHS, Value *CmpRHS,
8322 Value *TrueVal, Value *FalseVal,
8323 Value *&LHS, Value *&RHS) {
8324 // Try to match
8325 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
8326 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
8327 // and return description of the outer Max/Min.
8328
8329 // First, check if select has inverse order:
8330 if (CmpRHS == FalseVal) {
8331 std::swap(TrueVal, FalseVal);
8332 Pred = CmpInst::getInversePredicate(Pred);
8333 }
8334
8335 // Assume success now. If there's no match, callers should not use these anyway.
8336 LHS = TrueVal;
8337 RHS = FalseVal;
8338
8339 const APFloat *FC1;
8340 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
8341 return {SPF_UNKNOWN, SPNB_NA, false};
8342
8343 const APFloat *FC2;
8344 switch (Pred) {
8345 case CmpInst::FCMP_OLT:
8346 case CmpInst::FCMP_OLE:
8347 case CmpInst::FCMP_ULT:
8348 case CmpInst::FCMP_ULE:
8349 if (match(FalseVal, m_OrdOrUnordFMin(m_Specific(CmpLHS), m_APFloat(FC2))) &&
8350 *FC1 < *FC2)
8351 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
8352 break;
8353 case CmpInst::FCMP_OGT:
8354 case CmpInst::FCMP_OGE:
8355 case CmpInst::FCMP_UGT:
8356 case CmpInst::FCMP_UGE:
8357 if (match(FalseVal, m_OrdOrUnordFMax(m_Specific(CmpLHS), m_APFloat(FC2))) &&
8358 *FC1 > *FC2)
8359 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
8360 break;
8361 default:
8362 break;
8363 }
8364
8365 return {SPF_UNKNOWN, SPNB_NA, false};
8366}
8367
8368/// Recognize variations of:
8369/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
8371 Value *CmpLHS, Value *CmpRHS,
8372 Value *TrueVal, Value *FalseVal) {
8373 // Swap the select operands and predicate to match the patterns below.
8374 if (CmpRHS != TrueVal) {
8375 Pred = ICmpInst::getSwappedPredicate(Pred);
8376 std::swap(TrueVal, FalseVal);
8377 }
8378 const APInt *C1;
8379 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
8380 const APInt *C2;
8381 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
8382 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
8383 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
8384 return {SPF_SMAX, SPNB_NA, false};
8385
8386 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
8387 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
8388 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
8389 return {SPF_SMIN, SPNB_NA, false};
8390
8391 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
8392 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
8393 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
8394 return {SPF_UMAX, SPNB_NA, false};
8395
8396 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
8397 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
8398 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
8399 return {SPF_UMIN, SPNB_NA, false};
8400 }
8401 return {SPF_UNKNOWN, SPNB_NA, false};
8402}
8403
8404/// Recognize variations of:
8405/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
8407 Value *CmpLHS, Value *CmpRHS,
8408 Value *TVal, Value *FVal,
8409 unsigned Depth) {
8410 // TODO: Allow FP min/max with nnan/nsz.
8411 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
8412
8413 Value *A = nullptr, *B = nullptr;
8414 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
8415 if (!SelectPatternResult::isMinOrMax(L.Flavor))
8416 return {SPF_UNKNOWN, SPNB_NA, false};
8417
8418 Value *C = nullptr, *D = nullptr;
8419 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
8420 if (L.Flavor != R.Flavor)
8421 return {SPF_UNKNOWN, SPNB_NA, false};
8422
8423 // We have something like: x Pred y ? min(a, b) : min(c, d).
8424 // Try to match the compare to the min/max operations of the select operands.
8425 // First, make sure we have the right compare predicate.
8426 switch (L.Flavor) {
8427 case SPF_SMIN:
8428 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
8429 Pred = ICmpInst::getSwappedPredicate(Pred);
8430 std::swap(CmpLHS, CmpRHS);
8431 }
8432 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
8433 break;
8434 return {SPF_UNKNOWN, SPNB_NA, false};
8435 case SPF_SMAX:
8436 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
8437 Pred = ICmpInst::getSwappedPredicate(Pred);
8438 std::swap(CmpLHS, CmpRHS);
8439 }
8440 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
8441 break;
8442 return {SPF_UNKNOWN, SPNB_NA, false};
8443 case SPF_UMIN:
8444 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
8445 Pred = ICmpInst::getSwappedPredicate(Pred);
8446 std::swap(CmpLHS, CmpRHS);
8447 }
8448 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
8449 break;
8450 return {SPF_UNKNOWN, SPNB_NA, false};
8451 case SPF_UMAX:
8452 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
8453 Pred = ICmpInst::getSwappedPredicate(Pred);
8454 std::swap(CmpLHS, CmpRHS);
8455 }
8456 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
8457 break;
8458 return {SPF_UNKNOWN, SPNB_NA, false};
8459 default:
8460 return {SPF_UNKNOWN, SPNB_NA, false};
8461 }
8462
8463 // If there is a common operand in the already matched min/max and the other
8464 // min/max operands match the compare operands (either directly or inverted),
8465 // then this is min/max of the same flavor.
8466
8467 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
8468 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
8469 if (D == B) {
8470 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
8471 match(A, m_Not(m_Specific(CmpRHS)))))
8472 return {L.Flavor, SPNB_NA, false};
8473 }
8474 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
8475 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
8476 if (C == B) {
8477 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
8478 match(A, m_Not(m_Specific(CmpRHS)))))
8479 return {L.Flavor, SPNB_NA, false};
8480 }
8481 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
8482 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
8483 if (D == A) {
8484 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
8485 match(B, m_Not(m_Specific(CmpRHS)))))
8486 return {L.Flavor, SPNB_NA, false};
8487 }
8488 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
8489 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
8490 if (C == A) {
8491 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
8492 match(B, m_Not(m_Specific(CmpRHS)))))
8493 return {L.Flavor, SPNB_NA, false};
8494 }
8495
8496 return {SPF_UNKNOWN, SPNB_NA, false};
8497}
8498
8499/// If the input value is the result of a 'not' op, constant integer, or vector
8500/// splat of a constant integer, return the bitwise-not source value.
8501/// TODO: This could be extended to handle non-splat vector integer constants.
8503 Value *NotV;
8504 if (match(V, m_Not(m_Value(NotV))))
8505 return NotV;
8506
8507 const APInt *C;
8508 if (match(V, m_APInt(C)))
8509 return ConstantInt::get(V->getType(), ~(*C));
8510
8511 return nullptr;
8512}
8513
8514/// Match non-obvious integer minimum and maximum sequences.
8516 Value *CmpLHS, Value *CmpRHS,
8517 Value *TrueVal, Value *FalseVal,
8518 Value *&LHS, Value *&RHS,
8519 unsigned Depth) {
8520 // Assume success. If there's no match, callers should not use these anyway.
8521 LHS = TrueVal;
8522 RHS = FalseVal;
8523
8524 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
8526 return SPR;
8527
8528 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
8530 return SPR;
8531
8532 // Look through 'not' ops to find disguised min/max.
8533 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
8534 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
8535 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
8536 switch (Pred) {
8537 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
8538 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
8539 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
8540 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
8541 default: break;
8542 }
8543 }
8544
8545 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
8546 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
8547 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
8548 switch (Pred) {
8549 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
8550 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
8551 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
8552 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
8553 default: break;
8554 }
8555 }
8556
8557 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
8558 return {SPF_UNKNOWN, SPNB_NA, false};
8559
8560 const APInt *C1;
8561 if (!match(CmpRHS, m_APInt(C1)))
8562 return {SPF_UNKNOWN, SPNB_NA, false};
8563
8564 // An unsigned min/max can be written with a signed compare.
8565 const APInt *C2;
8566 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
8567 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
8568 // Is the sign bit set?
8569 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
8570 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
8571 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
8572 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
8573
8574 // Is the sign bit clear?
8575 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
8576 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
8577 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
8578 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
8579 }
8580
8581 return {SPF_UNKNOWN, SPNB_NA, false};
8582}
8583
8584bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW,
8585 bool AllowPoison) {
8586 assert(X && Y && "Invalid operand");
8587
8588 auto IsNegationOf = [&](const Value *X, const Value *Y) {
8589 if (!match(X, m_Neg(m_Specific(Y))))
8590 return false;
8591
8592 auto *BO = cast<BinaryOperator>(X);
8593 if (NeedNSW && !BO->hasNoSignedWrap())
8594 return false;
8595
8596 auto *Zero = cast<Constant>(BO->getOperand(0));
8597 if (!AllowPoison && !Zero->isNullValue())
8598 return false;
8599
8600 return true;
8601 };
8602
8603 // X = -Y or Y = -X
8604 if (IsNegationOf(X, Y) || IsNegationOf(Y, X))
8605 return true;
8606
8607 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
8608 Value *A, *B;
8609 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
8610 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
8611 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
8613}
8614
8615bool llvm::isKnownInversion(const Value *X, const Value *Y) {
8616 // Handle X = icmp pred A, B, Y = icmp pred A, C.
8617 Value *A, *B, *C;
8618 CmpPredicate Pred1, Pred2;
8619 if (!match(X, m_ICmp(Pred1, m_Value(A), m_Value(B))) ||
8620 !match(Y, m_c_ICmp(Pred2, m_Specific(A), m_Value(C))))
8621 return false;
8622
8623 // They must both have samesign flag or not.
8624 if (Pred1.hasSameSign() != Pred2.hasSameSign())
8625 return false;
8626
8627 if (B == C)
8628 return Pred1 == ICmpInst::getInversePredicate(Pred2);
8629
8630 // Try to infer the relationship from constant ranges.
8631 const APInt *RHSC1, *RHSC2;
8632 if (!match(B, m_APInt(RHSC1)) || !match(C, m_APInt(RHSC2)))
8633 return false;
8634
8635 // Sign bits of two RHSCs should match.
8636 if (Pred1.hasSameSign() && RHSC1->isNonNegative() != RHSC2->isNonNegative())
8637 return false;
8638
8639 const auto CR1 = ConstantRange::makeExactICmpRegion(Pred1, *RHSC1);
8640 const auto CR2 = ConstantRange::makeExactICmpRegion(Pred2, *RHSC2);
8641
8642 return CR1.inverse() == CR2;
8643}
8644
8646 SelectPatternNaNBehavior NaNBehavior,
8647 bool Ordered) {
8648 switch (Pred) {
8649 default:
8650 return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
8651 case ICmpInst::ICMP_UGT:
8652 case ICmpInst::ICMP_UGE:
8653 return {SPF_UMAX, SPNB_NA, false};
8654 case ICmpInst::ICMP_SGT:
8655 case ICmpInst::ICMP_SGE:
8656 return {SPF_SMAX, SPNB_NA, false};
8657 case ICmpInst::ICMP_ULT:
8658 case ICmpInst::ICMP_ULE:
8659 return {SPF_UMIN, SPNB_NA, false};
8660 case ICmpInst::ICMP_SLT:
8661 case ICmpInst::ICMP_SLE:
8662 return {SPF_SMIN, SPNB_NA, false};
8663 case FCmpInst::FCMP_UGT:
8664 case FCmpInst::FCMP_UGE:
8665 case FCmpInst::FCMP_OGT:
8666 case FCmpInst::FCMP_OGE:
8667 return {SPF_FMAXNUM, NaNBehavior, Ordered};
8668 case FCmpInst::FCMP_ULT:
8669 case FCmpInst::FCMP_ULE:
8670 case FCmpInst::FCMP_OLT:
8671 case FCmpInst::FCMP_OLE:
8672 return {SPF_FMINNUM, NaNBehavior, Ordered};
8673 }
8674}
8675
8676std::optional<std::pair<CmpPredicate, Constant *>>
8679 "Only for relational integer predicates.");
8680 if (isa<UndefValue>(C))
8681 return std::nullopt;
8682
8683 Type *Type = C->getType();
8684 bool IsSigned = ICmpInst::isSigned(Pred);
8685
8687 bool WillIncrement =
8688 UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
8689
8690 // Check if the constant operand can be safely incremented/decremented
8691 // without overflowing/underflowing.
8692 auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
8693 return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
8694 };
8695
8696 Constant *SafeReplacementConstant = nullptr;
8697 if (auto *CI = dyn_cast<ConstantInt>(C)) {
8698 // Bail out if the constant can't be safely incremented/decremented.
8699 if (!ConstantIsOk(CI))
8700 return std::nullopt;
8701 } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) {
8702 unsigned NumElts = FVTy->getNumElements();
8703 for (unsigned i = 0; i != NumElts; ++i) {
8704 Constant *Elt = C->getAggregateElement(i);
8705 if (!Elt)
8706 return std::nullopt;
8707
8708 if (isa<UndefValue>(Elt))
8709 continue;
8710
8711 // Bail out if we can't determine if this constant is min/max or if we
8712 // know that this constant is min/max.
8713 auto *CI = dyn_cast<ConstantInt>(Elt);
8714 if (!CI || !ConstantIsOk(CI))
8715 return std::nullopt;
8716
8717 if (!SafeReplacementConstant)
8718 SafeReplacementConstant = CI;
8719 }
8720 } else if (isa<VectorType>(C->getType())) {
8721 // Handle scalable splat
8722 Value *SplatC = C->getSplatValue();
8723 auto *CI = dyn_cast_or_null<ConstantInt>(SplatC);
8724 // Bail out if the constant can't be safely incremented/decremented.
8725 if (!CI || !ConstantIsOk(CI))
8726 return std::nullopt;
8727 } else {
8728 // ConstantExpr?
8729 return std::nullopt;
8730 }
8731
8732 // It may not be safe to change a compare predicate in the presence of
8733 // undefined elements, so replace those elements with the first safe constant
8734 // that we found.
8735 // TODO: in case of poison, it is safe; let's replace undefs only.
8736 if (C->containsUndefOrPoisonElement()) {
8737 assert(SafeReplacementConstant && "Replacement constant not set");
8738 C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
8739 }
8740
8742
8743 // Increment or decrement the constant.
8744 Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
8745 Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
8746
8747 return std::make_pair(NewPred, NewC);
8748}
8749
8751 FastMathFlags FMF,
8752 Value *CmpLHS, Value *CmpRHS,
8753 Value *TrueVal, Value *FalseVal,
8754 Value *&LHS, Value *&RHS,
8755 unsigned Depth) {
8756 bool HasMismatchedZeros = false;
8757 if (CmpInst::isFPPredicate(Pred)) {
8758 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
8759 // 0.0 operand, set the compare's 0.0 operands to that same value for the
8760 // purpose of identifying min/max. Disregard vector constants with undefined
8761 // elements because those can not be back-propagated for analysis.
8762 Value *OutputZeroVal = nullptr;
8763 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
8764 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
8765 OutputZeroVal = TrueVal;
8766 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
8767 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
8768 OutputZeroVal = FalseVal;
8769
8770 if (OutputZeroVal) {
8771 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
8772 HasMismatchedZeros = true;
8773 CmpLHS = OutputZeroVal;
8774 }
8775 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
8776 HasMismatchedZeros = true;
8777 CmpRHS = OutputZeroVal;
8778 }
8779 }
8780 }
8781
8782 LHS = CmpLHS;
8783 RHS = CmpRHS;
8784
8785 // Signed zero may return inconsistent results between implementations.
8786 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
8787 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
8788 // Therefore, we behave conservatively and only proceed if at least one of the
8789 // operands is known to not be zero or if we don't care about signed zero.
8790 switch (Pred) {
8791 default: break;
8794 if (!HasMismatchedZeros)
8795 break;
8796 [[fallthrough]];
8799 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
8800 !isKnownNonZero(CmpRHS))
8801 return {SPF_UNKNOWN, SPNB_NA, false};
8802 }
8803
8804 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
8805 bool Ordered = false;
8806
8807 // When given one NaN and one non-NaN input:
8808 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
8809 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
8810 // ordered comparison fails), which could be NaN or non-NaN.
8811 // so here we discover exactly what NaN behavior is required/accepted.
8812 if (CmpInst::isFPPredicate(Pred)) {
8813 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
8814 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
8815
8816 if (LHSSafe && RHSSafe) {
8817 // Both operands are known non-NaN.
8818 NaNBehavior = SPNB_RETURNS_ANY;
8819 Ordered = CmpInst::isOrdered(Pred);
8820 } else if (CmpInst::isOrdered(Pred)) {
8821 // An ordered comparison will return false when given a NaN, so it
8822 // returns the RHS.
8823 Ordered = true;
8824 if (LHSSafe)
8825 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
8826 NaNBehavior = SPNB_RETURNS_NAN;
8827 else if (RHSSafe)
8828 NaNBehavior = SPNB_RETURNS_OTHER;
8829 else
8830 // Completely unsafe.
8831 return {SPF_UNKNOWN, SPNB_NA, false};
8832 } else {
8833 Ordered = false;
8834 // An unordered comparison will return true when given a NaN, so it
8835 // returns the LHS.
8836 if (LHSSafe)
8837 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
8838 NaNBehavior = SPNB_RETURNS_OTHER;
8839 else if (RHSSafe)
8840 NaNBehavior = SPNB_RETURNS_NAN;
8841 else
8842 // Completely unsafe.
8843 return {SPF_UNKNOWN, SPNB_NA, false};
8844 }
8845 }
8846
8847 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8848 std::swap(CmpLHS, CmpRHS);
8849 Pred = CmpInst::getSwappedPredicate(Pred);
8850 if (NaNBehavior == SPNB_RETURNS_NAN)
8851 NaNBehavior = SPNB_RETURNS_OTHER;
8852 else if (NaNBehavior == SPNB_RETURNS_OTHER)
8853 NaNBehavior = SPNB_RETURNS_NAN;
8854 Ordered = !Ordered;
8855 }
8856
8857 // ([if]cmp X, Y) ? X : Y
8858 if (TrueVal == CmpLHS && FalseVal == CmpRHS)
8859 return getSelectPattern(Pred, NaNBehavior, Ordered);
8860
8861 if (isKnownNegation(TrueVal, FalseVal)) {
8862 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
8863 // match against either LHS or sext(LHS).
8864 auto MaybeSExtCmpLHS =
8865 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
8866 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
8867 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
8868 if (match(TrueVal, MaybeSExtCmpLHS)) {
8869 // Set the return values. If the compare uses the negated value (-X >s 0),
8870 // swap the return values because the negated value is always 'RHS'.
8871 LHS = TrueVal;
8872 RHS = FalseVal;
8873 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
8874 std::swap(LHS, RHS);
8875
8876 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
8877 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
8878 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
8879 return {SPF_ABS, SPNB_NA, false};
8880
8881 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
8882 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
8883 return {SPF_ABS, SPNB_NA, false};
8884
8885 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
8886 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
8887 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
8888 return {SPF_NABS, SPNB_NA, false};
8889 }
8890 else if (match(FalseVal, MaybeSExtCmpLHS)) {
8891 // Set the return values. If the compare uses the negated value (-X >s 0),
8892 // swap the return values because the negated value is always 'RHS'.
8893 LHS = FalseVal;
8894 RHS = TrueVal;
8895 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
8896 std::swap(LHS, RHS);
8897
8898 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
8899 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
8900 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
8901 return {SPF_NABS, SPNB_NA, false};
8902
8903 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
8904 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
8905 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
8906 return {SPF_ABS, SPNB_NA, false};
8907 }
8908 }
8909
8910 if (CmpInst::isIntPredicate(Pred))
8911 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
8912
8913 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
8914 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
8915 // semantics than minNum. Be conservative in such case.
8916 if (NaNBehavior != SPNB_RETURNS_ANY ||
8917 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
8918 !isKnownNonZero(CmpRHS)))
8919 return {SPF_UNKNOWN, SPNB_NA, false};
8920
8921 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
8922}
8923
8925 Instruction::CastOps *CastOp) {
8926 const DataLayout &DL = CmpI->getDataLayout();
8927
8928 Constant *CastedTo = nullptr;
8929 switch (*CastOp) {
8930 case Instruction::ZExt:
8931 if (CmpI->isUnsigned())
8932 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
8933 break;
8934 case Instruction::SExt:
8935 if (CmpI->isSigned())
8936 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
8937 break;
8938 case Instruction::Trunc:
8939 Constant *CmpConst;
8940 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
8941 CmpConst->getType() == SrcTy) {
8942 // Here we have the following case:
8943 //
8944 // %cond = cmp iN %x, CmpConst
8945 // %tr = trunc iN %x to iK
8946 // %narrowsel = select i1 %cond, iK %t, iK C
8947 //
8948 // We can always move trunc after select operation:
8949 //
8950 // %cond = cmp iN %x, CmpConst
8951 // %widesel = select i1 %cond, iN %x, iN CmpConst
8952 // %tr = trunc iN %widesel to iK
8953 //
8954 // Note that C could be extended in any way because we don't care about
8955 // upper bits after truncation. It can't be abs pattern, because it would
8956 // look like:
8957 //
8958 // select i1 %cond, x, -x.
8959 //
8960 // So only min/max pattern could be matched. Such match requires widened C
8961 // == CmpConst. That is why set widened C = CmpConst, condition trunc
8962 // CmpConst == C is checked below.
8963 CastedTo = CmpConst;
8964 } else {
8965 unsigned ExtOp = CmpI->isSigned() ? Instruction::SExt : Instruction::ZExt;
8966 CastedTo = ConstantFoldCastOperand(ExtOp, C, SrcTy, DL);
8967 }
8968 break;
8969 case Instruction::FPTrunc:
8970 CastedTo = ConstantFoldCastOperand(Instruction::FPExt, C, SrcTy, DL);
8971 break;
8972 case Instruction::FPExt:
8973 CastedTo = ConstantFoldCastOperand(Instruction::FPTrunc, C, SrcTy, DL);
8974 break;
8975 case Instruction::FPToUI:
8976 CastedTo = ConstantFoldCastOperand(Instruction::UIToFP, C, SrcTy, DL);
8977 break;
8978 case Instruction::FPToSI:
8979 CastedTo = ConstantFoldCastOperand(Instruction::SIToFP, C, SrcTy, DL);
8980 break;
8981 case Instruction::UIToFP:
8982 CastedTo = ConstantFoldCastOperand(Instruction::FPToUI, C, SrcTy, DL);
8983 break;
8984 case Instruction::SIToFP:
8985 CastedTo = ConstantFoldCastOperand(Instruction::FPToSI, C, SrcTy, DL);
8986 break;
8987 default:
8988 break;
8989 }
8990
8991 if (!CastedTo)
8992 return nullptr;
8993
8994 // Make sure the cast doesn't lose any information.
8995 Constant *CastedBack =
8996 ConstantFoldCastOperand(*CastOp, CastedTo, C->getType(), DL);
8997 if (CastedBack && CastedBack != C)
8998 return nullptr;
8999
9000 return CastedTo;
9001}
9002
9003/// Helps to match a select pattern in case of a type mismatch.
9004///
9005/// The function processes the case when type of true and false values of a
9006/// select instruction differs from type of the cmp instruction operands because
9007/// of a cast instruction. The function checks if it is legal to move the cast
9008/// operation after "select". If yes, it returns the new second value of
9009/// "select" (with the assumption that cast is moved):
9010/// 1. As operand of cast instruction when both values of "select" are same cast
9011/// instructions.
9012/// 2. As restored constant (by applying reverse cast operation) when the first
9013/// value of the "select" is a cast operation and the second value is a
9014/// constant. It is implemented in lookThroughCastConst().
9015/// 3. As one operand is cast instruction and the other is not. The operands in
9016/// sel(cmp) are in different type integer.
9017/// NOTE: We return only the new second value because the first value could be
9018/// accessed as operand of cast instruction.
9019static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
9020 Instruction::CastOps *CastOp) {
9021 auto *Cast1 = dyn_cast<CastInst>(V1);
9022 if (!Cast1)
9023 return nullptr;
9024
9025 *CastOp = Cast1->getOpcode();
9026 Type *SrcTy = Cast1->getSrcTy();
9027 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
9028 // If V1 and V2 are both the same cast from the same type, look through V1.
9029 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
9030 return Cast2->getOperand(0);
9031 return nullptr;
9032 }
9033
9034 auto *C = dyn_cast<Constant>(V2);
9035 if (C)
9036 return lookThroughCastConst(CmpI, SrcTy, C, CastOp);
9037
9038 Value *CastedTo = nullptr;
9039 if (*CastOp == Instruction::Trunc) {
9040 if (match(CmpI->getOperand(1), m_ZExtOrSExt(m_Specific(V2)))) {
9041 // Here we have the following case:
9042 // %y_ext = sext iK %y to iN
9043 // %cond = cmp iN %x, %y_ext
9044 // %tr = trunc iN %x to iK
9045 // %narrowsel = select i1 %cond, iK %tr, iK %y
9046 //
9047 // We can always move trunc after select operation:
9048 // %y_ext = sext iK %y to iN
9049 // %cond = cmp iN %x, %y_ext
9050 // %widesel = select i1 %cond, iN %x, iN %y_ext
9051 // %tr = trunc iN %widesel to iK
9052 assert(V2->getType() == Cast1->getType() &&
9053 "V2 and Cast1 should be the same type.");
9054 CastedTo = CmpI->getOperand(1);
9055 }
9056 }
9057
9058 return CastedTo;
9059}
9061 Instruction::CastOps *CastOp,
9062 unsigned Depth) {
9064 return {SPF_UNKNOWN, SPNB_NA, false};
9065
9067 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
9068
9069 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
9070 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
9071
9072 Value *TrueVal = SI->getTrueValue();
9073 Value *FalseVal = SI->getFalseValue();
9074
9076 CmpI, TrueVal, FalseVal, LHS, RHS,
9077 isa<FPMathOperator>(SI) ? SI->getFastMathFlags() : FastMathFlags(),
9078 CastOp, Depth);
9079}
9080
9082 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
9083 FastMathFlags FMF, Instruction::CastOps *CastOp, unsigned Depth) {
9084 CmpInst::Predicate Pred = CmpI->getPredicate();
9085 Value *CmpLHS = CmpI->getOperand(0);
9086 Value *CmpRHS = CmpI->getOperand(1);
9087 if (isa<FPMathOperator>(CmpI) && CmpI->hasNoNaNs())
9088 FMF.setNoNaNs();
9089
9090 // Bail out early.
9091 if (CmpI->isEquality())
9092 return {SPF_UNKNOWN, SPNB_NA, false};
9093
9094 // Deal with type mismatches.
9095 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
9096 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
9097 // If this is a potential fmin/fmax with a cast to integer, then ignore
9098 // -0.0 because there is no corresponding integer value.
9099 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9100 FMF.setNoSignedZeros();
9101 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9102 cast<CastInst>(TrueVal)->getOperand(0), C,
9103 LHS, RHS, Depth);
9104 }
9105 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
9106 // If this is a potential fmin/fmax with a cast to integer, then ignore
9107 // -0.0 because there is no corresponding integer value.
9108 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
9109 FMF.setNoSignedZeros();
9110 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
9111 C, cast<CastInst>(FalseVal)->getOperand(0),
9112 LHS, RHS, Depth);
9113 }
9114 }
9115 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
9116 LHS, RHS, Depth);
9117}
9118
9120 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
9121 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
9122 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
9123 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
9124 if (SPF == SPF_FMINNUM)
9125 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
9126 if (SPF == SPF_FMAXNUM)
9127 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
9128 llvm_unreachable("unhandled!");
9129}
9130
9132 switch (SPF) {
9134 return Intrinsic::umin;
9136 return Intrinsic::umax;
9138 return Intrinsic::smin;
9140 return Intrinsic::smax;
9141 default:
9142 llvm_unreachable("Unexpected SPF");
9143 }
9144}
9145
9147 if (SPF == SPF_SMIN) return SPF_SMAX;
9148 if (SPF == SPF_UMIN) return SPF_UMAX;
9149 if (SPF == SPF_SMAX) return SPF_SMIN;
9150 if (SPF == SPF_UMAX) return SPF_UMIN;
9151 llvm_unreachable("unhandled!");
9152}
9153
9155 switch (MinMaxID) {
9156 case Intrinsic::smax: return Intrinsic::smin;
9157 case Intrinsic::smin: return Intrinsic::smax;
9158 case Intrinsic::umax: return Intrinsic::umin;
9159 case Intrinsic::umin: return Intrinsic::umax;
9160 // Please note that next four intrinsics may produce the same result for
9161 // original and inverted case even if X != Y due to NaN is handled specially.
9162 case Intrinsic::maximum: return Intrinsic::minimum;
9163 case Intrinsic::minimum: return Intrinsic::maximum;
9164 case Intrinsic::maxnum: return Intrinsic::minnum;
9165 case Intrinsic::minnum: return Intrinsic::maxnum;
9166 case Intrinsic::maximumnum:
9167 return Intrinsic::minimumnum;
9168 case Intrinsic::minimumnum:
9169 return Intrinsic::maximumnum;
9170 default: llvm_unreachable("Unexpected intrinsic");
9171 }
9172}
9173
9175 switch (SPF) {
9178 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
9179 case SPF_UMIN: return APInt::getMinValue(BitWidth);
9180 default: llvm_unreachable("Unexpected flavor");
9181 }
9182}
9183
9184std::pair<Intrinsic::ID, bool>
9186 // Check if VL contains select instructions that can be folded into a min/max
9187 // vector intrinsic and return the intrinsic if it is possible.
9188 // TODO: Support floating point min/max.
9189 bool AllCmpSingleUse = true;
9190 SelectPatternResult SelectPattern;
9191 SelectPattern.Flavor = SPF_UNKNOWN;
9192 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
9193 Value *LHS, *RHS;
9194 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
9195 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor))
9196 return false;
9197 if (SelectPattern.Flavor != SPF_UNKNOWN &&
9198 SelectPattern.Flavor != CurrentPattern.Flavor)
9199 return false;
9200 SelectPattern = CurrentPattern;
9201 AllCmpSingleUse &=
9203 return true;
9204 })) {
9205 switch (SelectPattern.Flavor) {
9206 case SPF_SMIN:
9207 return {Intrinsic::smin, AllCmpSingleUse};
9208 case SPF_UMIN:
9209 return {Intrinsic::umin, AllCmpSingleUse};
9210 case SPF_SMAX:
9211 return {Intrinsic::smax, AllCmpSingleUse};
9212 case SPF_UMAX:
9213 return {Intrinsic::umax, AllCmpSingleUse};
9214 case SPF_FMAXNUM:
9215 return {Intrinsic::maxnum, AllCmpSingleUse};
9216 case SPF_FMINNUM:
9217 return {Intrinsic::minnum, AllCmpSingleUse};
9218 default:
9219 llvm_unreachable("unexpected select pattern flavor");
9220 }
9221 }
9222 return {Intrinsic::not_intrinsic, false};
9223}
9224
9225template <typename InstTy>
9226static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst,
9227 Value *&Init, Value *&OtherOp) {
9228 // Handle the case of a simple two-predecessor recurrence PHI.
9229 // There's a lot more that could theoretically be done here, but
9230 // this is sufficient to catch some interesting cases.
9231 // TODO: Expand list -- gep, uadd.sat etc.
9232 if (PN->getNumIncomingValues() != 2)
9233 return false;
9234
9235 for (unsigned I = 0; I != 2; ++I) {
9236 if (auto *Operation = dyn_cast<InstTy>(PN->getIncomingValue(I));
9237 Operation && Operation->getNumOperands() >= 2) {
9238 Value *LHS = Operation->getOperand(0);
9239 Value *RHS = Operation->getOperand(1);
9240 if (LHS != PN && RHS != PN)
9241 continue;
9242
9243 Inst = Operation;
9244 Init = PN->getIncomingValue(!I);
9245 OtherOp = (LHS == PN) ? RHS : LHS;
9246 return true;
9247 }
9248 }
9249 return false;
9250}
9251
9253 Value *&Start, Value *&Step) {
9254 // We try to match a recurrence of the form:
9255 // %iv = [Start, %entry], [%iv.next, %backedge]
9256 // %iv.next = binop %iv, Step
9257 // Or:
9258 // %iv = [Start, %entry], [%iv.next, %backedge]
9259 // %iv.next = binop Step, %iv
9260 return matchTwoInputRecurrence(P, BO, Start, Step);
9261}
9262
9264 Value *&Start, Value *&Step) {
9265 BinaryOperator *BO = nullptr;
9266 P = dyn_cast<PHINode>(I->getOperand(0));
9267 if (!P)
9268 P = dyn_cast<PHINode>(I->getOperand(1));
9269 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
9270}
9271
9273 PHINode *&P, Value *&Init,
9274 Value *&OtherOp) {
9275 // Binary intrinsics only supported for now.
9276 if (I->arg_size() != 2 || I->getType() != I->getArgOperand(0)->getType() ||
9277 I->getType() != I->getArgOperand(1)->getType())
9278 return false;
9279
9280 IntrinsicInst *II = nullptr;
9281 P = dyn_cast<PHINode>(I->getArgOperand(0));
9282 if (!P)
9283 P = dyn_cast<PHINode>(I->getArgOperand(1));
9284
9285 return P && matchTwoInputRecurrence(P, II, Init, OtherOp) && II == I;
9286}
9287
9288/// Return true if "icmp Pred LHS RHS" is always true.
9290 const Value *RHS) {
9291 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
9292 return true;
9293
9294 switch (Pred) {
9295 default:
9296 return false;
9297
9298 case CmpInst::ICMP_SLE: {
9299 const APInt *C;
9300
9301 // LHS s<= LHS +_{nsw} C if C >= 0
9302 // LHS s<= LHS | C if C >= 0
9303 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))) ||
9305 return !C->isNegative();
9306
9307 // LHS s<= smax(LHS, V) for any V
9309 return true;
9310
9311 // smin(RHS, V) s<= RHS for any V
9313 return true;
9314
9315 // Match A to (X +_{nsw} CA) and B to (X +_{nsw} CB)
9316 const Value *X;
9317 const APInt *CLHS, *CRHS;
9318 if (match(LHS, m_NSWAddLike(m_Value(X), m_APInt(CLHS))) &&
9320 return CLHS->sle(*CRHS);
9321
9322 return false;
9323 }
9324
9325 case CmpInst::ICMP_ULE: {
9326 // LHS u<= LHS +_{nuw} V for any V
9327 if (match(RHS, m_c_Add(m_Specific(LHS), m_Value())) &&
9329 return true;
9330
9331 // LHS u<= LHS | V for any V
9332 if (match(RHS, m_c_Or(m_Specific(LHS), m_Value())))
9333 return true;
9334
9335 // LHS u<= umax(LHS, V) for any V
9337 return true;
9338
9339 // RHS >> V u<= RHS for any V
9340 if (match(LHS, m_LShr(m_Specific(RHS), m_Value())))
9341 return true;
9342
9343 // RHS u/ C_ugt_1 u<= RHS
9344 const APInt *C;
9345 if (match(LHS, m_UDiv(m_Specific(RHS), m_APInt(C))) && C->ugt(1))
9346 return true;
9347
9348 // RHS & V u<= RHS for any V
9350 return true;
9351
9352 // umin(RHS, V) u<= RHS for any V
9354 return true;
9355
9356 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
9357 const Value *X;
9358 const APInt *CLHS, *CRHS;
9359 if (match(LHS, m_NUWAddLike(m_Value(X), m_APInt(CLHS))) &&
9361 return CLHS->ule(*CRHS);
9362
9363 return false;
9364 }
9365 }
9366}
9367
9368/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
9369/// ALHS ARHS" is true. Otherwise, return std::nullopt.
9370static std::optional<bool>
9372 const Value *ARHS, const Value *BLHS, const Value *BRHS) {
9373 switch (Pred) {
9374 default:
9375 return std::nullopt;
9376
9377 case CmpInst::ICMP_SLT:
9378 case CmpInst::ICMP_SLE:
9379 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS) &&
9381 return true;
9382 return std::nullopt;
9383
9384 case CmpInst::ICMP_SGT:
9385 case CmpInst::ICMP_SGE:
9386 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS) &&
9388 return true;
9389 return std::nullopt;
9390
9391 case CmpInst::ICMP_ULT:
9392 case CmpInst::ICMP_ULE:
9393 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS) &&
9395 return true;
9396 return std::nullopt;
9397
9398 case CmpInst::ICMP_UGT:
9399 case CmpInst::ICMP_UGE:
9400 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS) &&
9402 return true;
9403 return std::nullopt;
9404 }
9405}
9406
9407/// Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
9408/// Return false if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is false.
9409/// Otherwise, return std::nullopt if we can't infer anything.
9410static std::optional<bool>
9412 CmpPredicate RPred, const ConstantRange &RCR) {
9413 auto CRImpliesPred = [&](ConstantRange CR,
9414 CmpInst::Predicate Pred) -> std::optional<bool> {
9415 // If all true values for lhs and true for rhs, lhs implies rhs
9416 if (CR.icmp(Pred, RCR))
9417 return true;
9418
9419 // If there is no overlap, lhs implies not rhs
9420 if (CR.icmp(CmpInst::getInversePredicate(Pred), RCR))
9421 return false;
9422
9423 return std::nullopt;
9424 };
9425 if (auto Res = CRImpliesPred(ConstantRange::makeAllowedICmpRegion(LPred, LCR),
9426 RPred))
9427 return Res;
9428 if (LPred.hasSameSign() ^ RPred.hasSameSign()) {
9430 : LPred.dropSameSign();
9432 : RPred.dropSameSign();
9433 return CRImpliesPred(ConstantRange::makeAllowedICmpRegion(LPred, LCR),
9434 RPred);
9435 }
9436 return std::nullopt;
9437}
9438
9439/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
9440/// is true. Return false if LHS implies RHS is false. Otherwise, return
9441/// std::nullopt if we can't infer anything.
9442static std::optional<bool>
9443isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1,
9444 CmpPredicate RPred, const Value *R0, const Value *R1,
9445 const DataLayout &DL, bool LHSIsTrue) {
9446 // The rest of the logic assumes the LHS condition is true. If that's not the
9447 // case, invert the predicate to make it so.
9448 if (!LHSIsTrue)
9449 LPred = ICmpInst::getInverseCmpPredicate(LPred);
9450
9451 // We can have non-canonical operands, so try to normalize any common operand
9452 // to L0/R0.
9453 if (L0 == R1) {
9454 std::swap(R0, R1);
9455 RPred = ICmpInst::getSwappedCmpPredicate(RPred);
9456 }
9457 if (R0 == L1) {
9458 std::swap(L0, L1);
9459 LPred = ICmpInst::getSwappedCmpPredicate(LPred);
9460 }
9461 if (L1 == R1) {
9462 // If we have L0 == R0 and L1 == R1, then make L1/R1 the constants.
9463 if (L0 != R0 || match(L0, m_ImmConstant())) {
9464 std::swap(L0, L1);
9465 LPred = ICmpInst::getSwappedCmpPredicate(LPred);
9466 std::swap(R0, R1);
9467 RPred = ICmpInst::getSwappedCmpPredicate(RPred);
9468 }
9469 }
9470
9471 // See if we can infer anything if operand-0 matches and we have at least one
9472 // constant.
9473 const APInt *Unused;
9474 if (L0 == R0 && (match(L1, m_APInt(Unused)) || match(R1, m_APInt(Unused)))) {
9475 // Potential TODO: We could also further use the constant range of L0/R0 to
9476 // further constraint the constant ranges. At the moment this leads to
9477 // several regressions related to not transforming `multi_use(A + C0) eq/ne
9478 // C1` (see discussion: D58633).
9480 L1, ICmpInst::isSigned(LPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
9481 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
9483 R1, ICmpInst::isSigned(RPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
9484 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
9485 // Even if L1/R1 are not both constant, we can still sometimes deduce
9486 // relationship from a single constant. For example X u> Y implies X != 0.
9487 if (auto R = isImpliedCondCommonOperandWithCR(LPred, LCR, RPred, RCR))
9488 return R;
9489 // If both L1/R1 were exact constant ranges and we didn't get anything
9490 // here, we won't be able to deduce this.
9491 if (match(L1, m_APInt(Unused)) && match(R1, m_APInt(Unused)))
9492 return std::nullopt;
9493 }
9494
9495 // Can we infer anything when the two compares have matching operands?
9496 if (L0 == R0 && L1 == R1)
9497 return ICmpInst::isImpliedByMatchingCmp(LPred, RPred);
9498
9499 // It only really makes sense in the context of signed comparison for "X - Y
9500 // must be positive if X >= Y and no overflow".
9501 // Take SGT as an example: L0:x > L1:y and C >= 0
9502 // ==> R0:(x -nsw y) < R1:(-C) is false
9503 CmpInst::Predicate SignedLPred = LPred.getPreferredSignedPredicate();
9504 if ((SignedLPred == ICmpInst::ICMP_SGT ||
9505 SignedLPred == ICmpInst::ICMP_SGE) &&
9506 match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
9507 if (match(R1, m_NonPositive()) &&
9508 ICmpInst::isImpliedByMatchingCmp(SignedLPred, RPred) == false)
9509 return false;
9510 }
9511
9512 // Take SLT as an example: L0:x < L1:y and C <= 0
9513 // ==> R0:(x -nsw y) < R1:(-C) is true
9514 if ((SignedLPred == ICmpInst::ICMP_SLT ||
9515 SignedLPred == ICmpInst::ICMP_SLE) &&
9516 match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
9517 if (match(R1, m_NonNegative()) &&
9518 ICmpInst::isImpliedByMatchingCmp(SignedLPred, RPred) == true)
9519 return true;
9520 }
9521
9522 // a - b == NonZero -> a != b
9523 // ptrtoint(a) - ptrtoint(b) == NonZero -> a != b
9524 const APInt *L1C;
9525 Value *A, *B;
9526 if (LPred == ICmpInst::ICMP_EQ && ICmpInst::isEquality(RPred) &&
9527 match(L1, m_APInt(L1C)) && !L1C->isZero() &&
9528 match(L0, m_Sub(m_Value(A), m_Value(B))) &&
9529 ((A == R0 && B == R1) || (A == R1 && B == R0) ||
9534 return RPred.dropSameSign() == ICmpInst::ICMP_NE;
9535 }
9536
9537 // L0 = R0 = L1 + R1, L0 >=u L1 implies R0 >=u R1, L0 <u L1 implies R0 <u R1
9538 if (L0 == R0 &&
9539 (LPred == ICmpInst::ICMP_ULT || LPred == ICmpInst::ICMP_UGE) &&
9540 (RPred == ICmpInst::ICMP_ULT || RPred == ICmpInst::ICMP_UGE) &&
9541 match(L0, m_c_Add(m_Specific(L1), m_Specific(R1))))
9542 return CmpPredicate::getMatching(LPred, RPred).has_value();
9543
9544 if (auto P = CmpPredicate::getMatching(LPred, RPred))
9545 return isImpliedCondOperands(*P, L0, L1, R0, R1);
9546
9547 return std::nullopt;
9548}
9549
9550/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
9551/// is true. Return false if LHS implies RHS is false. Otherwise, return
9552/// std::nullopt if we can't infer anything.
9553static std::optional<bool>
9555 FCmpInst::Predicate RPred, const Value *R0, const Value *R1,
9556 const DataLayout &DL, bool LHSIsTrue) {
9557 // The rest of the logic assumes the LHS condition is true. If that's not the
9558 // case, invert the predicate to make it so.
9559 if (!LHSIsTrue)
9560 LPred = FCmpInst::getInversePredicate(LPred);
9561
9562 // We can have non-canonical operands, so try to normalize any common operand
9563 // to L0/R0.
9564 if (L0 == R1) {
9565 std::swap(R0, R1);
9566 RPred = FCmpInst::getSwappedPredicate(RPred);
9567 }
9568 if (R0 == L1) {
9569 std::swap(L0, L1);
9570 LPred = FCmpInst::getSwappedPredicate(LPred);
9571 }
9572 if (L1 == R1) {
9573 // If we have L0 == R0 and L1 == R1, then make L1/R1 the constants.
9574 if (L0 != R0 || match(L0, m_ImmConstant())) {
9575 std::swap(L0, L1);
9576 LPred = ICmpInst::getSwappedCmpPredicate(LPred);
9577 std::swap(R0, R1);
9578 RPred = ICmpInst::getSwappedCmpPredicate(RPred);
9579 }
9580 }
9581
9582 // Can we infer anything when the two compares have matching operands?
9583 if (L0 == R0 && L1 == R1) {
9584 if ((LPred & RPred) == LPred)
9585 return true;
9586 if ((LPred & ~RPred) == LPred)
9587 return false;
9588 }
9589
9590 // See if we can infer anything if operand-0 matches and we have at least one
9591 // constant.
9592 const APFloat *L1C, *R1C;
9593 if (L0 == R0 && match(L1, m_APFloat(L1C)) && match(R1, m_APFloat(R1C))) {
9594 if (std::optional<ConstantFPRange> DomCR =
9596 if (std::optional<ConstantFPRange> ImpliedCR =
9598 if (ImpliedCR->contains(*DomCR))
9599 return true;
9600 }
9601 if (std::optional<ConstantFPRange> ImpliedCR =
9603 FCmpInst::getInversePredicate(RPred), *R1C)) {
9604 if (ImpliedCR->contains(*DomCR))
9605 return false;
9606 }
9607 }
9608 }
9609
9610 return std::nullopt;
9611}
9612
9613/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
9614/// false. Otherwise, return std::nullopt if we can't infer anything. We
9615/// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
9616/// instruction.
9617static std::optional<bool>
9619 const Value *RHSOp0, const Value *RHSOp1,
9620 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
9621 // The LHS must be an 'or', 'and', or a 'select' instruction.
9622 assert((LHS->getOpcode() == Instruction::And ||
9623 LHS->getOpcode() == Instruction::Or ||
9624 LHS->getOpcode() == Instruction::Select) &&
9625 "Expected LHS to be 'and', 'or', or 'select'.");
9626
9627 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
9628
9629 // If the result of an 'or' is false, then we know both legs of the 'or' are
9630 // false. Similarly, if the result of an 'and' is true, then we know both
9631 // legs of the 'and' are true.
9632 const Value *ALHS, *ARHS;
9633 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
9634 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
9635 // FIXME: Make this non-recursion.
9636 if (std::optional<bool> Implication = isImpliedCondition(
9637 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
9638 return Implication;
9639 if (std::optional<bool> Implication = isImpliedCondition(
9640 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
9641 return Implication;
9642 return std::nullopt;
9643 }
9644 return std::nullopt;
9645}
9646
9647std::optional<bool>
9649 const Value *RHSOp0, const Value *RHSOp1,
9650 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
9651 // Bail out when we hit the limit.
9653 return std::nullopt;
9654
9655 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
9656 // example.
9657 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
9658 return std::nullopt;
9659
9660 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
9661 "Expected integer type only!");
9662
9663 // Match not
9664 if (match(LHS, m_Not(m_Value(LHS))))
9665 LHSIsTrue = !LHSIsTrue;
9666
9667 // Both LHS and RHS are icmps.
9668 if (RHSOp0->getType()->getScalarType()->isIntOrPtrTy()) {
9669 if (const auto *LHSCmp = dyn_cast<ICmpInst>(LHS))
9670 return isImpliedCondICmps(LHSCmp->getCmpPredicate(),
9671 LHSCmp->getOperand(0), LHSCmp->getOperand(1),
9672 RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue);
9673 const Value *V;
9674 if (match(LHS, m_NUWTrunc(m_Value(V))))
9676 ConstantInt::get(V->getType(), 0), RHSPred,
9677 RHSOp0, RHSOp1, DL, LHSIsTrue);
9678 } else {
9679 assert(RHSOp0->getType()->isFPOrFPVectorTy() &&
9680 "Expected floating point type only!");
9681 if (const auto *LHSCmp = dyn_cast<FCmpInst>(LHS))
9682 return isImpliedCondFCmps(LHSCmp->getPredicate(), LHSCmp->getOperand(0),
9683 LHSCmp->getOperand(1), RHSPred, RHSOp0, RHSOp1,
9684 DL, LHSIsTrue);
9685 }
9686
9687 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
9688 /// the RHS to be an icmp.
9689 /// FIXME: Add support for and/or/select on the RHS.
9690 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
9691 if ((LHSI->getOpcode() == Instruction::And ||
9692 LHSI->getOpcode() == Instruction::Or ||
9693 LHSI->getOpcode() == Instruction::Select))
9694 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
9695 Depth);
9696 }
9697 return std::nullopt;
9698}
9699
9700std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
9701 const DataLayout &DL,
9702 bool LHSIsTrue, unsigned Depth) {
9703 // LHS ==> RHS by definition
9704 if (LHS == RHS)
9705 return LHSIsTrue;
9706
9707 // Match not
9708 bool InvertRHS = false;
9709 if (match(RHS, m_Not(m_Value(RHS)))) {
9710 if (LHS == RHS)
9711 return !LHSIsTrue;
9712 InvertRHS = true;
9713 }
9714
9715 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS)) {
9716 if (auto Implied = isImpliedCondition(
9717 LHS, RHSCmp->getCmpPredicate(), RHSCmp->getOperand(0),
9718 RHSCmp->getOperand(1), DL, LHSIsTrue, Depth))
9719 return InvertRHS ? !*Implied : *Implied;
9720 return std::nullopt;
9721 }
9722 if (const FCmpInst *RHSCmp = dyn_cast<FCmpInst>(RHS)) {
9723 if (auto Implied = isImpliedCondition(
9724 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0),
9725 RHSCmp->getOperand(1), DL, LHSIsTrue, Depth))
9726 return InvertRHS ? !*Implied : *Implied;
9727 return std::nullopt;
9728 }
9729
9730 const Value *V;
9731 if (match(RHS, m_NUWTrunc(m_Value(V)))) {
9732 if (auto Implied = isImpliedCondition(LHS, CmpInst::ICMP_NE, V,
9733 ConstantInt::get(V->getType(), 0), DL,
9734 LHSIsTrue, Depth))
9735 return InvertRHS ? !*Implied : *Implied;
9736 return std::nullopt;
9737 }
9738
9740 return std::nullopt;
9741
9742 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
9743 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
9744 const Value *RHS1, *RHS2;
9745 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
9746 if (std::optional<bool> Imp =
9747 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
9748 if (*Imp == true)
9749 return !InvertRHS;
9750 if (std::optional<bool> Imp =
9751 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
9752 if (*Imp == true)
9753 return !InvertRHS;
9754 }
9755 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
9756 if (std::optional<bool> Imp =
9757 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
9758 if (*Imp == false)
9759 return InvertRHS;
9760 if (std::optional<bool> Imp =
9761 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
9762 if (*Imp == false)
9763 return InvertRHS;
9764 }
9765
9766 return std::nullopt;
9767}
9768
9769// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
9770// condition dominating ContextI or nullptr, if no condition is found.
9771static std::pair<Value *, bool>
9773 if (!ContextI || !ContextI->getParent())
9774 return {nullptr, false};
9775
9776 // TODO: This is a poor/cheap way to determine dominance. Should we use a
9777 // dominator tree (eg, from a SimplifyQuery) instead?
9778 const BasicBlock *ContextBB = ContextI->getParent();
9779 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
9780 if (!PredBB)
9781 return {nullptr, false};
9782
9783 // We need a conditional branch in the predecessor.
9784 Value *PredCond;
9785 BasicBlock *TrueBB, *FalseBB;
9786 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
9787 return {nullptr, false};
9788
9789 // The branch should get simplified. Don't bother simplifying this condition.
9790 if (TrueBB == FalseBB)
9791 return {nullptr, false};
9792
9793 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9794 "Predecessor block does not point to successor?");
9795
9796 // Is this condition implied by the predecessor condition?
9797 return {PredCond, TrueBB == ContextBB};
9798}
9799
9800std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
9801 const Instruction *ContextI,
9802 const DataLayout &DL) {
9803 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
9804 auto PredCond = getDomPredecessorCondition(ContextI);
9805 if (PredCond.first)
9806 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
9807 return std::nullopt;
9808}
9809
9811 const Value *LHS,
9812 const Value *RHS,
9813 const Instruction *ContextI,
9814 const DataLayout &DL) {
9815 auto PredCond = getDomPredecessorCondition(ContextI);
9816 if (PredCond.first)
9817 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
9818 PredCond.second);
9819 return std::nullopt;
9820}
9821
9823 APInt &Upper, const InstrInfoQuery &IIQ,
9824 bool PreferSignedRange) {
9825 unsigned Width = Lower.getBitWidth();
9826 const APInt *C;
9827 switch (BO.getOpcode()) {
9828 case Instruction::Sub:
9829 if (match(BO.getOperand(0), m_APInt(C))) {
9830 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
9831 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
9832
9833 // If the caller expects a signed compare, then try to use a signed range.
9834 // Otherwise if both no-wraps are set, use the unsigned range because it
9835 // is never larger than the signed range. Example:
9836 // "sub nuw nsw i8 -2, x" is unsigned [0, 254] vs. signed [-128, 126].
9837 // "sub nuw nsw i8 2, x" is unsigned [0, 2] vs. signed [-125, 127].
9838 if (PreferSignedRange && HasNSW && HasNUW)
9839 HasNUW = false;
9840
9841 if (HasNUW) {
9842 // 'sub nuw c, x' produces [0, C].
9843 Upper = *C + 1;
9844 } else if (HasNSW) {
9845 if (C->isNegative()) {
9846 // 'sub nsw -C, x' produces [SINT_MIN, -C - SINT_MIN].
9848 Upper = *C - APInt::getSignedMaxValue(Width);
9849 } else {
9850 // Note that sub 0, INT_MIN is not NSW. It techically is a signed wrap
9851 // 'sub nsw C, x' produces [C - SINT_MAX, SINT_MAX].
9852 Lower = *C - APInt::getSignedMaxValue(Width);
9854 }
9855 }
9856 }
9857 break;
9858 case Instruction::Add:
9859 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
9860 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
9861 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
9862
9863 // If the caller expects a signed compare, then try to use a signed
9864 // range. Otherwise if both no-wraps are set, use the unsigned range
9865 // because it is never larger than the signed range. Example: "add nuw
9866 // nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
9867 if (PreferSignedRange && HasNSW && HasNUW)
9868 HasNUW = false;
9869
9870 if (HasNUW) {
9871 // 'add nuw x, C' produces [C, UINT_MAX].
9872 Lower = *C;
9873 } else if (HasNSW) {
9874 if (C->isNegative()) {
9875 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
9877 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
9878 } else {
9879 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
9880 Lower = APInt::getSignedMinValue(Width) + *C;
9881 Upper = APInt::getSignedMaxValue(Width) + 1;
9882 }
9883 }
9884 }
9885 break;
9886
9887 case Instruction::And:
9888 if (match(BO.getOperand(1), m_APInt(C)))
9889 // 'and x, C' produces [0, C].
9890 Upper = *C + 1;
9891 // X & -X is a power of two or zero. So we can cap the value at max power of
9892 // two.
9893 if (match(BO.getOperand(0), m_Neg(m_Specific(BO.getOperand(1)))) ||
9894 match(BO.getOperand(1), m_Neg(m_Specific(BO.getOperand(0)))))
9895 Upper = APInt::getSignedMinValue(Width) + 1;
9896 break;
9897
9898 case Instruction::Or:
9899 if (match(BO.getOperand(1), m_APInt(C)))
9900 // 'or x, C' produces [C, UINT_MAX].
9901 Lower = *C;
9902 break;
9903
9904 case Instruction::AShr:
9905 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9906 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
9908 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
9909 } else if (match(BO.getOperand(0), m_APInt(C))) {
9910 unsigned ShiftAmount = Width - 1;
9911 if (!C->isZero() && IIQ.isExact(&BO))
9912 ShiftAmount = C->countr_zero();
9913 if (C->isNegative()) {
9914 // 'ashr C, x' produces [C, C >> (Width-1)]
9915 Lower = *C;
9916 Upper = C->ashr(ShiftAmount) + 1;
9917 } else {
9918 // 'ashr C, x' produces [C >> (Width-1), C]
9919 Lower = C->ashr(ShiftAmount);
9920 Upper = *C + 1;
9921 }
9922 }
9923 break;
9924
9925 case Instruction::LShr:
9926 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9927 // 'lshr x, C' produces [0, UINT_MAX >> C].
9928 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
9929 } else if (match(BO.getOperand(0), m_APInt(C))) {
9930 // 'lshr C, x' produces [C >> (Width-1), C].
9931 unsigned ShiftAmount = Width - 1;
9932 if (!C->isZero() && IIQ.isExact(&BO))
9933 ShiftAmount = C->countr_zero();
9934 Lower = C->lshr(ShiftAmount);
9935 Upper = *C + 1;
9936 }
9937 break;
9938
9939 case Instruction::Shl:
9940 if (match(BO.getOperand(0), m_APInt(C))) {
9941 if (IIQ.hasNoUnsignedWrap(&BO)) {
9942 // 'shl nuw C, x' produces [C, C << CLZ(C)]
9943 Lower = *C;
9944 Upper = Lower.shl(Lower.countl_zero()) + 1;
9945 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
9946 if (C->isNegative()) {
9947 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
9948 unsigned ShiftAmount = C->countl_one() - 1;
9949 Lower = C->shl(ShiftAmount);
9950 Upper = *C + 1;
9951 } else {
9952 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
9953 unsigned ShiftAmount = C->countl_zero() - 1;
9954 Lower = *C;
9955 Upper = C->shl(ShiftAmount) + 1;
9956 }
9957 } else {
9958 // If lowbit is set, value can never be zero.
9959 if ((*C)[0])
9960 Lower = APInt::getOneBitSet(Width, 0);
9961 // If we are shifting a constant the largest it can be is if the longest
9962 // sequence of consecutive ones is shifted to the highbits (breaking
9963 // ties for which sequence is higher). At the moment we take a liberal
9964 // upper bound on this by just popcounting the constant.
9965 // TODO: There may be a bitwise trick for it longest/highest
9966 // consecutative sequence of ones (naive method is O(Width) loop).
9967 Upper = APInt::getHighBitsSet(Width, C->popcount()) + 1;
9968 }
9969 } else if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9970 Upper = APInt::getBitsSetFrom(Width, C->getZExtValue()) + 1;
9971 }
9972 break;
9973
9974 case Instruction::SDiv:
9975 if (match(BO.getOperand(1), m_APInt(C))) {
9976 APInt IntMin = APInt::getSignedMinValue(Width);
9977 APInt IntMax = APInt::getSignedMaxValue(Width);
9978 if (C->isAllOnes()) {
9979 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
9980 // where C != -1 and C != 0 and C != 1
9981 Lower = IntMin + 1;
9982 Upper = IntMax + 1;
9983 } else if (C->countl_zero() < Width - 1) {
9984 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
9985 // where C != -1 and C != 0 and C != 1
9986 Lower = IntMin.sdiv(*C);
9987 Upper = IntMax.sdiv(*C);
9988 if (Lower.sgt(Upper))
9990 Upper = Upper + 1;
9991 assert(Upper != Lower && "Upper part of range has wrapped!");
9992 }
9993 } else if (match(BO.getOperand(0), m_APInt(C))) {
9994 if (C->isMinSignedValue()) {
9995 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
9996 Lower = *C;
9997 Upper = Lower.lshr(1) + 1;
9998 } else {
9999 // 'sdiv C, x' produces [-|C|, |C|].
10000 Upper = C->abs() + 1;
10001 Lower = (-Upper) + 1;
10002 }
10003 }
10004 break;
10005
10006 case Instruction::UDiv:
10007 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
10008 // 'udiv x, C' produces [0, UINT_MAX / C].
10009 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
10010 } else if (match(BO.getOperand(0), m_APInt(C))) {
10011 // 'udiv C, x' produces [0, C].
10012 Upper = *C + 1;
10013 }
10014 break;
10015
10016 case Instruction::SRem:
10017 if (match(BO.getOperand(1), m_APInt(C))) {
10018 // 'srem x, C' produces (-|C|, |C|).
10019 Upper = C->abs();
10020 Lower = (-Upper) + 1;
10021 } else if (match(BO.getOperand(0), m_APInt(C))) {
10022 if (C->isNegative()) {
10023 // 'srem -|C|, x' produces [-|C|, 0].
10024 Upper = 1;
10025 Lower = *C;
10026 } else {
10027 // 'srem |C|, x' produces [0, |C|].
10028 Upper = *C + 1;
10029 }
10030 }
10031 break;
10032
10033 case Instruction::URem:
10034 if (match(BO.getOperand(1), m_APInt(C)))
10035 // 'urem x, C' produces [0, C).
10036 Upper = *C;
10037 else if (match(BO.getOperand(0), m_APInt(C)))
10038 // 'urem C, x' produces [0, C].
10039 Upper = *C + 1;
10040 break;
10041
10042 default:
10043 break;
10044 }
10045}
10046
10048 bool UseInstrInfo) {
10049 unsigned Width = II.getType()->getScalarSizeInBits();
10050 const APInt *C;
10051 switch (II.getIntrinsicID()) {
10052 case Intrinsic::ctlz:
10053 case Intrinsic::cttz: {
10054 APInt Upper(Width, Width);
10055 if (!UseInstrInfo || !match(II.getArgOperand(1), m_One()))
10056 Upper += 1;
10057 // Maximum of set/clear bits is the bit width.
10059 }
10060 case Intrinsic::ctpop:
10061 // Maximum of set/clear bits is the bit width.
10063 APInt(Width, Width) + 1);
10064 case Intrinsic::uadd_sat:
10065 // uadd.sat(x, C) produces [C, UINT_MAX].
10066 if (match(II.getOperand(0), m_APInt(C)) ||
10067 match(II.getOperand(1), m_APInt(C)))
10069 break;
10070 case Intrinsic::sadd_sat:
10071 if (match(II.getOperand(0), m_APInt(C)) ||
10072 match(II.getOperand(1), m_APInt(C))) {
10073 if (C->isNegative())
10074 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
10076 APInt::getSignedMaxValue(Width) + *C +
10077 1);
10078
10079 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
10081 APInt::getSignedMaxValue(Width) + 1);
10082 }
10083 break;
10084 case Intrinsic::usub_sat:
10085 // usub.sat(C, x) produces [0, C].
10086 if (match(II.getOperand(0), m_APInt(C)))
10087 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
10088
10089 // usub.sat(x, C) produces [0, UINT_MAX - C].
10090 if (match(II.getOperand(1), m_APInt(C)))
10092 APInt::getMaxValue(Width) - *C + 1);
10093 break;
10094 case Intrinsic::ssub_sat:
10095 if (match(II.getOperand(0), m_APInt(C))) {
10096 if (C->isNegative())
10097 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
10099 *C - APInt::getSignedMinValue(Width) +
10100 1);
10101
10102 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
10104 APInt::getSignedMaxValue(Width) + 1);
10105 } else if (match(II.getOperand(1), m_APInt(C))) {
10106 if (C->isNegative())
10107 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
10109 APInt::getSignedMaxValue(Width) + 1);
10110
10111 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
10113 APInt::getSignedMaxValue(Width) - *C +
10114 1);
10115 }
10116 break;
10117 case Intrinsic::umin:
10118 case Intrinsic::umax:
10119 case Intrinsic::smin:
10120 case Intrinsic::smax:
10121 if (!match(II.getOperand(0), m_APInt(C)) &&
10122 !match(II.getOperand(1), m_APInt(C)))
10123 break;
10124
10125 switch (II.getIntrinsicID()) {
10126 case Intrinsic::umin:
10127 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
10128 case Intrinsic::umax:
10130 case Intrinsic::smin:
10132 *C + 1);
10133 case Intrinsic::smax:
10135 APInt::getSignedMaxValue(Width) + 1);
10136 default:
10137 llvm_unreachable("Must be min/max intrinsic");
10138 }
10139 break;
10140 case Intrinsic::abs:
10141 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
10142 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
10143 if (match(II.getOperand(1), m_One()))
10145 APInt::getSignedMaxValue(Width) + 1);
10146
10148 APInt::getSignedMinValue(Width) + 1);
10149 case Intrinsic::vscale:
10150 if (!II.getParent() || !II.getFunction())
10151 break;
10152 return getVScaleRange(II.getFunction(), Width);
10153 default:
10154 break;
10155 }
10156
10157 return ConstantRange::getFull(Width);
10158}
10159
10161 const InstrInfoQuery &IIQ) {
10162 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
10163 const Value *LHS = nullptr, *RHS = nullptr;
10165 if (R.Flavor == SPF_UNKNOWN)
10166 return ConstantRange::getFull(BitWidth);
10167
10168 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
10169 // If the negation part of the abs (in RHS) has the NSW flag,
10170 // then the result of abs(X) is [0..SIGNED_MAX],
10171 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
10172 if (match(RHS, m_Neg(m_Specific(LHS))) &&
10176
10179 }
10180
10181 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
10182 // The result of -abs(X) is <= 0.
10184 APInt(BitWidth, 1));
10185 }
10186
10187 const APInt *C;
10188 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
10189 return ConstantRange::getFull(BitWidth);
10190
10191 switch (R.Flavor) {
10192 case SPF_UMIN:
10194 case SPF_UMAX:
10196 case SPF_SMIN:
10198 *C + 1);
10199 case SPF_SMAX:
10202 default:
10203 return ConstantRange::getFull(BitWidth);
10204 }
10205}
10206
10208 // The maximum representable value of a half is 65504. For floats the maximum
10209 // value is 3.4e38 which requires roughly 129 bits.
10210 unsigned BitWidth = I->getType()->getScalarSizeInBits();
10211 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
10212 return;
10213 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
10214 Lower = APInt(BitWidth, -65504, true);
10215 Upper = APInt(BitWidth, 65505);
10216 }
10217
10218 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
10219 // For a fptoui the lower limit is left as 0.
10220 Upper = APInt(BitWidth, 65505);
10221 }
10222}
10223
10225 bool UseInstrInfo, AssumptionCache *AC,
10226 const Instruction *CtxI,
10227 const DominatorTree *DT,
10228 unsigned Depth) {
10229 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
10230
10232 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
10233
10234 if (auto *C = dyn_cast<Constant>(V))
10235 return C->toConstantRange();
10236
10237 unsigned BitWidth = V->getType()->getScalarSizeInBits();
10238 InstrInfoQuery IIQ(UseInstrInfo);
10239 ConstantRange CR = ConstantRange::getFull(BitWidth);
10240 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
10241 APInt Lower = APInt(BitWidth, 0);
10242 APInt Upper = APInt(BitWidth, 0);
10243 // TODO: Return ConstantRange.
10244 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
10246 } else if (auto *II = dyn_cast<IntrinsicInst>(V))
10247 CR = getRangeForIntrinsic(*II, UseInstrInfo);
10248 else if (auto *SI = dyn_cast<SelectInst>(V)) {
10250 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
10252 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
10253 CR = CRTrue.unionWith(CRFalse);
10255 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
10256 APInt Lower = APInt(BitWidth, 0);
10257 APInt Upper = APInt(BitWidth, 0);
10258 // TODO: Return ConstantRange.
10261 } else if (const auto *A = dyn_cast<Argument>(V))
10262 if (std::optional<ConstantRange> Range = A->getRange())
10263 CR = *Range;
10264
10265 if (auto *I = dyn_cast<Instruction>(V)) {
10266 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
10268
10269 if (const auto *CB = dyn_cast<CallBase>(V))
10270 if (std::optional<ConstantRange> Range = CB->getRange())
10271 CR = CR.intersectWith(*Range);
10272 }
10273
10274 if (CtxI && AC) {
10275 // Try to restrict the range based on information from assumptions.
10276 for (auto &AssumeVH : AC->assumptionsFor(V)) {
10277 if (!AssumeVH)
10278 continue;
10279 CallInst *I = cast<CallInst>(AssumeVH);
10280 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
10281 "Got assumption for the wrong function!");
10282 assert(I->getIntrinsicID() == Intrinsic::assume &&
10283 "must be an assume intrinsic");
10284
10285 if (!isValidAssumeForContext(I, CtxI, DT))
10286 continue;
10287 Value *Arg = I->getArgOperand(0);
10288 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
10289 // Currently we just use information from comparisons.
10290 if (!Cmp || Cmp->getOperand(0) != V)
10291 continue;
10292 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
10293 ConstantRange RHS =
10294 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
10295 UseInstrInfo, AC, I, DT, Depth + 1);
10296 CR = CR.intersectWith(
10297 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
10298 }
10299 }
10300
10301 return CR;
10302}
10303
10304static void
10306 function_ref<void(Value *)> InsertAffected) {
10307 assert(V != nullptr);
10308 if (isa<Argument>(V) || isa<GlobalValue>(V)) {
10309 InsertAffected(V);
10310 } else if (auto *I = dyn_cast<Instruction>(V)) {
10311 InsertAffected(V);
10312
10313 // Peek through unary operators to find the source of the condition.
10314 Value *Op;
10316 m_Trunc(m_Value(Op))))) {
10318 InsertAffected(Op);
10319 }
10320 }
10321}
10322
10324 Value *Cond, bool IsAssume, function_ref<void(Value *)> InsertAffected) {
10325 auto AddAffected = [&InsertAffected](Value *V) {
10326 addValueAffectedByCondition(V, InsertAffected);
10327 };
10328
10329 auto AddCmpOperands = [&AddAffected, IsAssume](Value *LHS, Value *RHS) {
10330 if (IsAssume) {
10331 AddAffected(LHS);
10332 AddAffected(RHS);
10333 } else if (match(RHS, m_Constant()))
10334 AddAffected(LHS);
10335 };
10336
10337 SmallVector<Value *, 8> Worklist;
10339 Worklist.push_back(Cond);
10340 while (!Worklist.empty()) {
10341 Value *V = Worklist.pop_back_val();
10342 if (!Visited.insert(V).second)
10343 continue;
10344
10345 CmpPredicate Pred;
10346 Value *A, *B, *X;
10347
10348 if (IsAssume) {
10349 AddAffected(V);
10350 if (match(V, m_Not(m_Value(X))))
10351 AddAffected(X);
10352 }
10353
10354 if (match(V, m_LogicalOp(m_Value(A), m_Value(B)))) {
10355 // assume(A && B) is split to -> assume(A); assume(B);
10356 // assume(!(A || B)) is split to -> assume(!A); assume(!B);
10357 // Finally, assume(A || B) / assume(!(A && B)) generally don't provide
10358 // enough information to be worth handling (intersection of information as
10359 // opposed to union).
10360 if (!IsAssume) {
10361 Worklist.push_back(A);
10362 Worklist.push_back(B);
10363 }
10364 } else if (match(V, m_ICmp(Pred, m_Value(A), m_Value(B)))) {
10365 bool HasRHSC = match(B, m_ConstantInt());
10366 if (ICmpInst::isEquality(Pred)) {
10367 AddAffected(A);
10368 if (IsAssume)
10369 AddAffected(B);
10370 if (HasRHSC) {
10371 Value *Y;
10372 // (X << C) or (X >>_s C) or (X >>_u C).
10373 if (match(A, m_Shift(m_Value(X), m_ConstantInt())))
10374 AddAffected(X);
10375 // (X & C) or (X | C).
10376 else if (match(A, m_And(m_Value(X), m_Value(Y))) ||
10377 match(A, m_Or(m_Value(X), m_Value(Y)))) {
10378 AddAffected(X);
10379 AddAffected(Y);
10380 }
10381 // X - Y
10382 else if (match(A, m_Sub(m_Value(X), m_Value(Y)))) {
10383 AddAffected(X);
10384 AddAffected(Y);
10385 }
10386 }
10387 } else {
10388 AddCmpOperands(A, B);
10389 if (HasRHSC) {
10390 // Handle (A + C1) u< C2, which is the canonical form of
10391 // A > C3 && A < C4.
10393 AddAffected(X);
10394
10395 if (ICmpInst::isUnsigned(Pred)) {
10396 Value *Y;
10397 // X & Y u> C -> X >u C && Y >u C
10398 // X | Y u< C -> X u< C && Y u< C
10399 // X nuw+ Y u< C -> X u< C && Y u< C
10400 if (match(A, m_And(m_Value(X), m_Value(Y))) ||
10401 match(A, m_Or(m_Value(X), m_Value(Y))) ||
10402 match(A, m_NUWAdd(m_Value(X), m_Value(Y)))) {
10403 AddAffected(X);
10404 AddAffected(Y);
10405 }
10406 // X nuw- Y u> C -> X u> C
10407 if (match(A, m_NUWSub(m_Value(X), m_Value())))
10408 AddAffected(X);
10409 }
10410 }
10411
10412 // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported
10413 // by computeKnownFPClass().
10415 if (Pred == ICmpInst::ICMP_SLT && match(B, m_Zero()))
10416 InsertAffected(X);
10417 else if (Pred == ICmpInst::ICMP_SGT && match(B, m_AllOnes()))
10418 InsertAffected(X);
10419 }
10420 }
10421
10422 if (HasRHSC && match(A, m_Intrinsic<Intrinsic::ctpop>(m_Value(X))))
10423 AddAffected(X);
10424 } else if (match(V, m_FCmp(Pred, m_Value(A), m_Value(B)))) {
10425 AddCmpOperands(A, B);
10426
10427 // fcmp fneg(x), y
10428 // fcmp fabs(x), y
10429 // fcmp fneg(fabs(x)), y
10430 if (match(A, m_FNeg(m_Value(A))))
10431 AddAffected(A);
10432 if (match(A, m_FAbs(m_Value(A))))
10433 AddAffected(A);
10434
10436 m_Value()))) {
10437 // Handle patterns that computeKnownFPClass() support.
10438 AddAffected(A);
10439 } else if (!IsAssume && match(V, m_Trunc(m_Value(X)))) {
10440 // Assume is checked here as X is already added above for assumes in
10441 // addValueAffectedByCondition
10442 AddAffected(X);
10443 } else if (!IsAssume && match(V, m_Not(m_Value(X)))) {
10444 // Assume is checked here to avoid issues with ephemeral values
10445 Worklist.push_back(X);
10446 }
10447 }
10448}
10449
10451 // (X >> C) or/add (X & mask(C) != 0)
10452 if (const auto *BO = dyn_cast<BinaryOperator>(V)) {
10453 if (BO->getOpcode() == Instruction::Add ||
10454 BO->getOpcode() == Instruction::Or) {
10455 const Value *X;
10456 const APInt *C1, *C2;
10457 if (match(BO, m_c_BinOp(m_LShr(m_Value(X), m_APInt(C1)),
10461 m_Zero())))) &&
10462 C2->popcount() == C1->getZExtValue())
10463 return X;
10464 }
10465 }
10466 return nullptr;
10467}
10468
10470 return const_cast<Value *>(stripNullTest(const_cast<const Value *>(V)));
10471}
10472
10475 unsigned MaxCount, bool AllowUndefOrPoison) {
10478 auto Push = [&](const Value *V) -> bool {
10479 Constant *C;
10480 if (match(const_cast<Value *>(V), m_ImmConstant(C))) {
10481 if (!AllowUndefOrPoison && !isGuaranteedNotToBeUndefOrPoison(C))
10482 return false;
10483 // Check existence first to avoid unnecessary allocations.
10484 if (Constants.contains(C))
10485 return true;
10486 if (Constants.size() == MaxCount)
10487 return false;
10488 Constants.insert(C);
10489 return true;
10490 }
10491
10492 if (auto *Inst = dyn_cast<Instruction>(V)) {
10493 if (Visited.insert(Inst).second)
10494 Worklist.push_back(Inst);
10495 return true;
10496 }
10497 return false;
10498 };
10499 if (!Push(V))
10500 return false;
10501 while (!Worklist.empty()) {
10502 const Instruction *CurInst = Worklist.pop_back_val();
10503 switch (CurInst->getOpcode()) {
10504 case Instruction::Select:
10505 if (!Push(CurInst->getOperand(1)))
10506 return false;
10507 if (!Push(CurInst->getOperand(2)))
10508 return false;
10509 break;
10510 case Instruction::PHI:
10511 for (Value *IncomingValue : cast<PHINode>(CurInst)->incoming_values()) {
10512 // Fast path for recurrence PHI.
10513 if (IncomingValue == CurInst)
10514 continue;
10515 if (!Push(IncomingValue))
10516 return false;
10517 }
10518 break;
10519 default:
10520 return false;
10521 }
10522 }
10523 return true;
10524}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Utilities for dealing with flags related to floating point properties and mode controls.
static Value * getCondition(Instruction *I)
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
static bool hasNoUnsignedWrap(BinaryOperator &I)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
PowerPC Reduce CR logical Operation
R600 Clause Merge
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
std::pair< BasicBlock *, BasicBlock * > Edge
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
Definition VPlanSLP.cpp:210
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext, unsigned Depth=0)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, SimplifyQuery &Q, unsigned Depth)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V1 == (binop V2, X), where X is known non-zero.
static bool isGEPKnownNonNull(const GEPOperator *GEP, const SimplifyQuery &Q, unsigned Depth)
Test whether a GEP's result is known to be non-null.
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroMul(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &Q, unsigned Depth)
static void breakSelfRecursivePHI(const Use *U, const PHINode *PHI, Value *&ValOut, Instruction *&CtxIOut, const PHINode **PhiOut=nullptr)
static bool isNonZeroSub(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, unsigned Depth)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static constexpr unsigned MaxInstrsToCheckForFree
Maximum number of instructions to check between assume and context instruction.
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, const KnownBits &KnownVal, unsigned Depth)
static std::optional< bool > isImpliedCondFCmps(FCmpInst::Predicate LPred, const Value *L0, const Value *L1, FCmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
UndefPoisonKind
static bool isKnownNonEqualFromContext(const Value *V1, const Value *V2, const SimplifyQuery &Q, unsigned Depth)
static bool includesPoison(UndefPoisonKind Kind)
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static std::optional< bool > isImpliedCondICmps(CmpPredicate LPred, const Value *L0, const Value *L1, CmpPredicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static bool includesUndef(UndefPoisonKind Kind)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpPredicate LPred, const ConstantRange &LCR, CmpPredicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpPredicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static bool isNonZeroAdd(const APInt &DemandedElts, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW, unsigned Depth)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchTwoInputRecurrence(const PHINode *PN, InstTy *&Inst, Value *&Init, Value *&OtherOp)
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ?
static void unionWithMinMaxIntrinsicClamp(const IntrinsicInst *II, KnownBits &Known)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static Value * lookThroughCastConst(CmpInst *CmpI, Type *SrcTy, Constant *C, Instruction::CastOps *CastOp)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBitsFromLerpPattern(const Value *Op0, const Value *Op1, const APInt &DemandedElts, KnownBits &KnownOut, const SimplifyQuery &Q, unsigned Depth)
Try to detect the lerp pattern: a * (b - c) + c * d where a >= 0, b >= 0, c >= 0, d >= 0,...
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static constexpr KnownFPClass::MinMaxKind getMinMaxKind(Intrinsic::ID IID)
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return the number of times the sign bit of the register is replicated into the other bits.
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ?
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, const SimplifyQuery &SQ, bool Invert, unsigned Depth)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero, const Value *Cond, bool CondIsTrue)
Return true if we can infer that V is known to be a power of 2 from dominating condition Cond (e....
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, const SimplifyQuery &Q, unsigned Depth)
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II, bool UseInstrInfo)
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth)
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
Value * RHS
Value * LHS
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:290
static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
Definition APFloat.cpp:340
bool isFinite() const
Definition APFloat.h:1436
bool isNaN() const
Definition APFloat.h:1429
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1120
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1407
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1392
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1671
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition APInt.h:1386
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
unsigned ceilLogBase2() const
Definition APInt.h:1765
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1183
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1666
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition APInt.h:1250
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
void clearAllBits()
Set every bit to 0.
Definition APInt.h:1397
LLVM_ABI APInt reverseBits() const
Definition APInt.cpp:768
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1167
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
Definition APInt.h:1629
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
unsigned logBase2() const
Definition APInt.h:1762
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:472
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:874
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1258
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
Definition APInt.h:1389
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
void clearSignBit()
Set the sign bit to 0.
Definition APInt.h:1450
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
iterator end() const
Definition ArrayRef.h:131
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
iterator begin() const
Definition ArrayRef.h:130
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
Class to represent array types.
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
LLVM_ABI unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator end()
Definition BasicBlock.h:472
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
Definition BasicBlock.h:171
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
bool isSigned() const
Definition InstrTypes.h:930
static LLVM_ABI bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition InstrTypes.h:893
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
bool hasSameSign() const
Query samesign information, for optimizations.
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:707
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
Definition Constants.h:598
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
Definition Constants.h:673
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:781
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
This class represents a range of values.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI bool isAllNegative() const
Return true if all values in this range are negative.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
Definition Constants.cpp:76
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:214
unsigned getAddressSizeInBits(unsigned AS) const
The size in bits of an address in for the given AS.
Definition DataLayout.h:507
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:771
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static LLVM_ABI Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
This instruction compares its operands according to the predicate given to the constructor.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
bool noSignedZeros() const
Definition FMF.h:67
bool noInfs() const
Definition FMF.h:66
void setNoSignedZeros(bool B=true)
Definition FMF.h:84
void setNoNaNs(bool B=true)
Definition FMF.h:78
bool noNaNs() const
Definition FMF.h:65
const BasicBlock & getEntryBlock() const
Definition Function.h:807
bool hasNoSync() const
Determine if the call can synchroize with other threads.
Definition Function.h:637
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
PointerType * getType() const
Global values are always pointers.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition Globals.cpp:132
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getSwappedCmpPredicate() const
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
static LLVM_ABI std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isBinaryOp() const
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isUnaryOp() const
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Metadata node.
Definition Metadata.h:1078
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition Operator.h:33
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition Operator.h:43
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv, sdiv, lshr, or ashr instruction, which can be marked as "exact", indicating that no bits are ...
Definition Operator.h:154
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
Definition Operator.h:173
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static LLVM_ABI void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition DataLayout.h:723
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:754
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
static LLVM_ABI IntegerType * getInt16Ty(LLVMContext &C)
Definition Type.cpp:295
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition Type.h:255
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:300
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition Type.h:225
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition Use.cpp:35
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
op_range operands()
Definition User.h:293
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
Definition WithCache.h:59
PointerType getValue() const
Definition WithCache.h:57
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
A range adaptor for a pair of iterators.
CallInst * Call
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3009
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition APInt.h:2264
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > > m_OrdOrUnordFMin(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point minimum function.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
match_combine_or< MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty >, MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > > m_OrdOrUnordFMax(const LHS &L, const RHS &R)
Match an 'ordered' or 'unordered' floating point maximum function.
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition Loads.cpp:229
LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
LLVM_ABI Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
LLVM_ABI std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
LLVM_ABI bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition Loads.cpp:420
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:284
gep_type_iterator gep_type_end(const User *GEP)
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
Definition APFloat.h:1516
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:337
LLVM_ABI bool canIgnoreSignBitOfZero(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is zero.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_UNKNOWN
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
LLVM_ABI SelectPatternResult getSelectPattern(CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior=SPNB_NA, bool Ordered=false)
Determine the pattern for predicate X Pred Y ? X : Y.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
LLVM_ABI void adjustKnownFPClassForSelectArm(KnownFPClass &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI bool collectPossibleValues(const Value *V, SmallPtrSetImpl< const Constant * > &Constants, unsigned MaxCount, bool AllowUndefOrPoison=true)
Enumerates all possible immediate values of V and inserts them into the set Constants.
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, const Instruction *CtxI, const DominatorTree *DT=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF)
Convert given SPF to equivalent min/max intrinsic.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ Add
Sum of integers.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point value can never contain a NaN or infinity.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
gep_type_iterator gep_type_begin(const User *GEP)
LLVM_ABI Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_ABI Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
LLVM_ABI bool canIgnoreSignBitOfNaN(const Use &U)
Return true if the sign bit of the FP value can be ignored by the user when the value is NaN.
LLVM_ABI void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
SmallPtrSet< Value *, 4 > AffectedValues
Represents offset+length into a ConstantDataArray.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ IEEE
IEEE-754 denormal numbers preserved.
static constexpr DenormalMode getDynamic()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedZeros(const InstT *Op) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition KnownBits.h:301
static LLVM_ABI KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
static LLVM_ABI std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Definition KnownBits.h:186
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
Definition KnownBits.h:255
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
LLVM_ABI KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
void makeNonNegative()
Make this value non-negative.
Definition KnownBits.h:124
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:251
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
Definition KnownBits.h:66
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:274
LLVM_ABI KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
void makeNegative()
Make this value negative.
Definition KnownBits.h:119
void setAllConflict()
Make all bits known to be both zero and one.
Definition KnownBits.h:99
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
Definition KnownBits.h:161
KnownBits byteSwap() const
Definition KnownBits.h:514
bool hasConflict() const
Returns true if there is conflicting information.
Definition KnownBits.h:51
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:289
void setAllZero()
Make all bits known to be zero and discard any previous information.
Definition KnownBits.h:86
KnownBits reverseBits() const
Definition KnownBits.h:518
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
Definition KnownBits.h:172
bool isConstant() const
Returns true if we know the value of all bits.
Definition KnownBits.h:54
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
Definition KnownBits.h:321
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
Definition KnownBits.h:111
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
Definition KnownBits.h:225
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:311
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
Definition KnownBits.h:180
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
Definition KnownBits.h:245
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
Definition KnownBits.h:347
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition KnownBits.h:196
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:129
static LLVM_ABI KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
Definition KnownBits.cpp:60
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
Definition KnownBits.h:326
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
Definition KnownBits.h:353
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition KnownBits.h:280
void setAllOnes()
Make all bits known to be one and discard any previous information.
Definition KnownBits.h:92
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:219
static LLVM_ABI KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
Definition KnownBits.h:167
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
static LLVM_ABI std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static LLVM_ABI std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
Definition KnownBits.h:206
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
static LLVM_ABI KnownFPClass fmul(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fmul.
void copysign(const KnownFPClass &Sign)
static KnownFPClass square(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
static LLVM_ABI KnownFPClass canonicalize(const KnownFPClass &Src, DenormalMode DenormMode=DenormalMode::getDynamic())
Apply the canonicalize intrinsic to this value.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
static LLVM_ABI KnownFPClass log(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for log/log2/log10.
KnownFPClass intersectWith(const KnownFPClass &RHS)
static LLVM_ABI KnownFPClass minMaxLike(const KnownFPClass &LHS, const KnownFPClass &RHS, MinMaxKind Kind, DenormalMode DenormMode=DenormalMode::getDynamic())
bool isUnknown() const
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
static LLVM_ABI KnownFPClass exp(const KnownFPClass &Src)
Report known values for exp, exp2 and exp10.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
LLVM_ABI void propagateCanonicalizingSrc(const KnownFPClass &Src, DenormalMode Mode)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void signBitMustBeZero()
Assume the sign bit is zero.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
SimplifyQuery getWithoutCondContext() const
const Instruction * CxtI
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC
const DomConditionCache * DC
const InstrInfoQuery IIQ
const CondContext * CC