LLVM  14.0.0git
ValueTracking.cpp
Go to the documentation of this file.
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
32 #include "llvm/Analysis/Loads.h"
33 #include "llvm/Analysis/LoopInfo.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/IntrinsicsAArch64.h"
56 #include "llvm/IR/IntrinsicsRISCV.h"
57 #include "llvm/IR/IntrinsicsX86.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Metadata.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/IR/PatternMatch.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/User.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/Support/Casting.h"
68 #include "llvm/Support/Compiler.h"
70 #include "llvm/Support/KnownBits.h"
72 #include <algorithm>
73 #include <array>
74 #include <cassert>
75 #include <cstdint>
76 #include <iterator>
77 #include <utility>
78 
79 using namespace llvm;
80 using namespace llvm::PatternMatch;
81 
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85  cl::Hidden, cl::init(20));
86 
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90  if (unsigned BitWidth = Ty->getScalarSizeInBits())
91  return BitWidth;
92 
93  return DL.getPointerTypeSizeInBits(Ty);
94 }
95 
96 namespace {
97 
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103  const DataLayout &DL;
104  AssumptionCache *AC;
105  const Instruction *CxtI;
106  const DominatorTree *DT;
107 
108  // Unlike the other analyses, this may be a nullptr because not all clients
109  // provide it currently.
111 
112  /// If true, it is safe to use metadata during simplification.
113  InstrInfoQuery IIQ;
114 
115  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
116  const DominatorTree *DT, bool UseInstrInfo,
117  OptimizationRemarkEmitter *ORE = nullptr)
118  : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
119 };
120 
121 } // end anonymous namespace
122 
123 // Given the provided Value and, potentially, a context instruction, return
124 // the preferred context instruction (if any).
125 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
126  // If we've been provided with a context instruction, then use that (provided
127  // it has been inserted).
128  if (CxtI && CxtI->getParent())
129  return CxtI;
130 
131  // If the value is really an already-inserted instruction, then use that.
132  CxtI = dyn_cast<Instruction>(V);
133  if (CxtI && CxtI->getParent())
134  return CxtI;
135 
136  return nullptr;
137 }
138 
139 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
140  // If we've been provided with a context instruction, then use that (provided
141  // it has been inserted).
142  if (CxtI && CxtI->getParent())
143  return CxtI;
144 
145  // If the value is really an already-inserted instruction, then use that.
146  CxtI = dyn_cast<Instruction>(V1);
147  if (CxtI && CxtI->getParent())
148  return CxtI;
149 
150  CxtI = dyn_cast<Instruction>(V2);
151  if (CxtI && CxtI->getParent())
152  return CxtI;
153 
154  return nullptr;
155 }
156 
158  const APInt &DemandedElts,
159  APInt &DemandedLHS, APInt &DemandedRHS) {
160  // The length of scalable vectors is unknown at compile time, thus we
161  // cannot check their values
162  if (isa<ScalableVectorType>(Shuf->getType()))
163  return false;
164 
165  int NumElts =
166  cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
167  int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
168  DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
169  if (DemandedElts.isNullValue())
170  return true;
171  // Simple case of a shuffle with zeroinitializer.
172  if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
173  DemandedLHS.setBit(0);
174  return true;
175  }
176  for (int i = 0; i != NumMaskElts; ++i) {
177  if (!DemandedElts[i])
178  continue;
179  int M = Shuf->getMaskValue(i);
180  assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
181 
182  // For undef elements, we don't know anything about the common state of
183  // the shuffle result.
184  if (M == -1)
185  return false;
186  if (M < NumElts)
187  DemandedLHS.setBit(M % NumElts);
188  else
189  DemandedRHS.setBit(M % NumElts);
190  }
191 
192  return true;
193 }
194 
195 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
196  KnownBits &Known, unsigned Depth, const Query &Q);
197 
198 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
199  const Query &Q) {
200  // FIXME: We currently have no way to represent the DemandedElts of a scalable
201  // vector
202  if (isa<ScalableVectorType>(V->getType())) {
203  Known.resetAll();
204  return;
205  }
206 
207  auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
208  APInt DemandedElts =
209  FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
210  computeKnownBits(V, DemandedElts, Known, Depth, Q);
211 }
212 
213 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
214  const DataLayout &DL, unsigned Depth,
215  AssumptionCache *AC, const Instruction *CxtI,
216  const DominatorTree *DT,
217  OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
218  ::computeKnownBits(V, Known, Depth,
219  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
220 }
221 
222 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
223  KnownBits &Known, const DataLayout &DL,
224  unsigned Depth, AssumptionCache *AC,
225  const Instruction *CxtI, const DominatorTree *DT,
226  OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227  ::computeKnownBits(V, DemandedElts, Known, Depth,
228  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229 }
230 
231 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
232  unsigned Depth, const Query &Q);
233 
234 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
235  const Query &Q);
236 
238  unsigned Depth, AssumptionCache *AC,
239  const Instruction *CxtI,
240  const DominatorTree *DT,
242  bool UseInstrInfo) {
244  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246 
247 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248  const DataLayout &DL, unsigned Depth,
249  AssumptionCache *AC, const Instruction *CxtI,
250  const DominatorTree *DT,
252  bool UseInstrInfo) {
254  V, DemandedElts, Depth,
255  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256 }
257 
258 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
259  const DataLayout &DL, AssumptionCache *AC,
260  const Instruction *CxtI, const DominatorTree *DT,
261  bool UseInstrInfo) {
262  assert(LHS->getType() == RHS->getType() &&
263  "LHS and RHS should have the same type");
264  assert(LHS->getType()->isIntOrIntVectorTy() &&
265  "LHS and RHS should be integers");
266  // Look for an inverted mask: (X & ~M) op (Y & M).
267  Value *M;
268  if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
269  match(RHS, m_c_And(m_Specific(M), m_Value())))
270  return true;
271  if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
272  match(LHS, m_c_And(m_Specific(M), m_Value())))
273  return true;
274  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
275  KnownBits LHSKnown(IT->getBitWidth());
276  KnownBits RHSKnown(IT->getBitWidth());
277  computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
278  computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
279  return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
280 }
281 
283  for (const User *U : CxtI->users()) {
284  if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
285  if (IC->isEquality())
286  if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
287  if (C->isNullValue())
288  continue;
289  return false;
290  }
291  return true;
292 }
293 
294 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
295  const Query &Q);
296 
298  bool OrZero, unsigned Depth,
299  AssumptionCache *AC, const Instruction *CxtI,
300  const DominatorTree *DT, bool UseInstrInfo) {
302  V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
303 }
304 
305 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
306  unsigned Depth, const Query &Q);
307 
308 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
309 
310 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
311  AssumptionCache *AC, const Instruction *CxtI,
312  const DominatorTree *DT, bool UseInstrInfo) {
314  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
315 }
316 
318  unsigned Depth, AssumptionCache *AC,
319  const Instruction *CxtI, const DominatorTree *DT,
320  bool UseInstrInfo) {
321  KnownBits Known =
322  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
323  return Known.isNonNegative();
324 }
325 
326 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
327  AssumptionCache *AC, const Instruction *CxtI,
328  const DominatorTree *DT, bool UseInstrInfo) {
329  if (auto *CI = dyn_cast<ConstantInt>(V))
330  return CI->getValue().isStrictlyPositive();
331 
332  // TODO: We'd doing two recursive queries here. We should factor this such
333  // that only a single query is needed.
334  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
335  isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
336 }
337 
338 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
339  AssumptionCache *AC, const Instruction *CxtI,
340  const DominatorTree *DT, bool UseInstrInfo) {
341  KnownBits Known =
342  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
343  return Known.isNegative();
344 }
345 
346 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
347  const Query &Q);
348 
349 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
350  const DataLayout &DL, AssumptionCache *AC,
351  const Instruction *CxtI, const DominatorTree *DT,
352  bool UseInstrInfo) {
354  Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
355  UseInstrInfo, /*ORE=*/nullptr));
356 }
357 
358 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
359  const Query &Q);
360 
361 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
362  const DataLayout &DL, unsigned Depth,
363  AssumptionCache *AC, const Instruction *CxtI,
364  const DominatorTree *DT, bool UseInstrInfo) {
366  V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
367 }
368 
369 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
370  unsigned Depth, const Query &Q);
371 
372 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
373  const Query &Q) {
374  // FIXME: We currently have no way to represent the DemandedElts of a scalable
375  // vector
376  if (isa<ScalableVectorType>(V->getType()))
377  return 1;
378 
379  auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
380  APInt DemandedElts =
381  FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
382  return ComputeNumSignBits(V, DemandedElts, Depth, Q);
383 }
384 
385 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
386  unsigned Depth, AssumptionCache *AC,
387  const Instruction *CxtI,
388  const DominatorTree *DT, bool UseInstrInfo) {
390  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
391 }
392 
393 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
394  bool NSW, const APInt &DemandedElts,
395  KnownBits &KnownOut, KnownBits &Known2,
396  unsigned Depth, const Query &Q) {
397  computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
398 
399  // If one operand is unknown and we have no nowrap information,
400  // the result will be unknown independently of the second operand.
401  if (KnownOut.isUnknown() && !NSW)
402  return;
403 
404  computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
405  KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
406 }
407 
408 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
409  const APInt &DemandedElts, KnownBits &Known,
410  KnownBits &Known2, unsigned Depth,
411  const Query &Q) {
412  computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
413  computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
414 
415  bool isKnownNegative = false;
416  bool isKnownNonNegative = false;
417  // If the multiplication is known not to overflow, compute the sign bit.
418  if (NSW) {
419  if (Op0 == Op1) {
420  // The product of a number with itself is non-negative.
421  isKnownNonNegative = true;
422  } else {
423  bool isKnownNonNegativeOp1 = Known.isNonNegative();
424  bool isKnownNonNegativeOp0 = Known2.isNonNegative();
425  bool isKnownNegativeOp1 = Known.isNegative();
426  bool isKnownNegativeOp0 = Known2.isNegative();
427  // The product of two numbers with the same sign is non-negative.
428  isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
429  (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
430  // The product of a negative number and a non-negative number is either
431  // negative or zero.
432  if (!isKnownNonNegative)
434  (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
435  Known2.isNonZero()) ||
436  (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
437  }
438  }
439 
440  Known = KnownBits::mul(Known, Known2);
441 
442  // Only make use of no-wrap flags if we failed to compute the sign bit
443  // directly. This matters if the multiplication always overflows, in
444  // which case we prefer to follow the result of the direct computation,
445  // though as the program is invoking undefined behaviour we can choose
446  // whatever we like here.
447  if (isKnownNonNegative && !Known.isNegative())
448  Known.makeNonNegative();
449  else if (isKnownNegative && !Known.isNonNegative())
450  Known.makeNegative();
451 }
452 
454  KnownBits &Known) {
455  unsigned BitWidth = Known.getBitWidth();
456  unsigned NumRanges = Ranges.getNumOperands() / 2;
457  assert(NumRanges >= 1);
458 
459  Known.Zero.setAllBits();
460  Known.One.setAllBits();
461 
462  for (unsigned i = 0; i < NumRanges; ++i) {
463  ConstantInt *Lower =
464  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
465  ConstantInt *Upper =
466  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
467  ConstantRange Range(Lower->getValue(), Upper->getValue());
468 
469  // The first CommonPrefixBits of all values in Range are equal.
470  unsigned CommonPrefixBits =
471  (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
472  APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
473  APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
474  Known.One &= UnsignedMax & Mask;
475  Known.Zero &= ~UnsignedMax & Mask;
476  }
477 }
478 
479 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
480  SmallVector<const Value *, 16> WorkSet(1, I);
483 
484  // The instruction defining an assumption's condition itself is always
485  // considered ephemeral to that assumption (even if it has other
486  // non-ephemeral users). See r246696's test case for an example.
487  if (is_contained(I->operands(), E))
488  return true;
489 
490  while (!WorkSet.empty()) {
491  const Value *V = WorkSet.pop_back_val();
492  if (!Visited.insert(V).second)
493  continue;
494 
495  // If all uses of this value are ephemeral, then so is this value.
496  if (llvm::all_of(V->users(), [&](const User *U) {
497  return EphValues.count(U);
498  })) {
499  if (V == E)
500  return true;
501 
502  if (V == I || isSafeToSpeculativelyExecute(V)) {
503  EphValues.insert(V);
504  if (const User *U = dyn_cast<User>(V))
505  append_range(WorkSet, U->operands());
506  }
507  }
508  }
509 
510  return false;
511 }
512 
513 // Is this an intrinsic that cannot be speculated but also cannot trap?
515  if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
516  return CI->isAssumeLikeIntrinsic();
517 
518  return false;
519 }
520 
522  const Instruction *CxtI,
523  const DominatorTree *DT) {
524  // There are two restrictions on the use of an assume:
525  // 1. The assume must dominate the context (or the control flow must
526  // reach the assume whenever it reaches the context).
527  // 2. The context must not be in the assume's set of ephemeral values
528  // (otherwise we will use the assume to prove that the condition
529  // feeding the assume is trivially true, thus causing the removal of
530  // the assume).
531 
532  if (Inv->getParent() == CxtI->getParent()) {
533  // If Inv and CtxI are in the same block, check if the assume (Inv) is first
534  // in the BB.
535  if (Inv->comesBefore(CxtI))
536  return true;
537 
538  // Don't let an assume affect itself - this would cause the problems
539  // `isEphemeralValueOf` is trying to prevent, and it would also make
540  // the loop below go out of bounds.
541  if (Inv == CxtI)
542  return false;
543 
544  // The context comes first, but they're both in the same block.
545  // Make sure there is nothing in between that might interrupt
546  // the control flow, not even CxtI itself.
547  // We limit the scan distance between the assume and its context instruction
548  // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
549  // it can be adjusted if needed (could be turned into a cl::opt).
550  unsigned ScanLimit = 15;
551  for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
552  if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
553  return false;
554 
555  return !isEphemeralValueOf(Inv, CxtI);
556  }
557 
558  // Inv and CxtI are in different blocks.
559  if (DT) {
560  if (DT->dominates(Inv, CxtI))
561  return true;
562  } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
563  // We don't have a DT, but this trivially dominates.
564  return true;
565  }
566 
567  return false;
568 }
569 
570 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
571  // v u> y implies v != 0.
572  if (Pred == ICmpInst::ICMP_UGT)
573  return true;
574 
575  // Special-case v != 0 to also handle v != null.
576  if (Pred == ICmpInst::ICMP_NE)
577  return match(RHS, m_Zero());
578 
579  // All other predicates - rely on generic ConstantRange handling.
580  const APInt *C;
581  if (!match(RHS, m_APInt(C)))
582  return false;
583 
585  return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
586 }
587 
588 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
589  // Use of assumptions is context-sensitive. If we don't have a context, we
590  // cannot use them!
591  if (!Q.AC || !Q.CxtI)
592  return false;
593 
594  if (Q.CxtI && V->getType()->isPointerTy()) {
595  SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
596  if (!NullPointerIsDefined(Q.CxtI->getFunction(),
598  AttrKinds.push_back(Attribute::Dereferenceable);
599 
600  if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
601  return true;
602  }
603 
604  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
605  if (!AssumeVH)
606  continue;
607  CallInst *I = cast<CallInst>(AssumeVH);
608  assert(I->getFunction() == Q.CxtI->getFunction() &&
609  "Got assumption for the wrong function!");
610 
611  // Warning: This loop can end up being somewhat performance sensitive.
612  // We're running this loop for once for each value queried resulting in a
613  // runtime of ~O(#assumes * #values).
614 
615  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
616  "must be an assume intrinsic");
617 
618  Value *RHS;
619  CmpInst::Predicate Pred;
620  auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
621  if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
622  return false;
623 
624  if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
625  return true;
626  }
627 
628  return false;
629 }
630 
631 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
632  unsigned Depth, const Query &Q) {
633  // Use of assumptions is context-sensitive. If we don't have a context, we
634  // cannot use them!
635  if (!Q.AC || !Q.CxtI)
636  return;
637 
638  unsigned BitWidth = Known.getBitWidth();
639 
640  // Refine Known set if the pointer alignment is set by assume bundles.
641  if (V->getType()->isPointerTy()) {
643  V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
644  Known.Zero.setLowBits(Log2_32(RK.ArgValue));
645  }
646  }
647 
648  // Note that the patterns below need to be kept in sync with the code
649  // in AssumptionCache::updateAffectedValues.
650 
651  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
652  if (!AssumeVH)
653  continue;
654  CallInst *I = cast<CallInst>(AssumeVH);
655  assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
656  "Got assumption for the wrong function!");
657 
658  // Warning: This loop can end up being somewhat performance sensitive.
659  // We're running this loop for once for each value queried resulting in a
660  // runtime of ~O(#assumes * #values).
661 
662  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
663  "must be an assume intrinsic");
664 
665  Value *Arg = I->getArgOperand(0);
666 
667  if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668  assert(BitWidth == 1 && "assume operand is not i1?");
669  Known.setAllOnes();
670  return;
671  }
672  if (match(Arg, m_Not(m_Specific(V))) &&
673  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
674  assert(BitWidth == 1 && "assume operand is not i1?");
675  Known.setAllZero();
676  return;
677  }
678 
679  // The remaining tests are all recursive, so bail out if we hit the limit.
681  continue;
682 
683  ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
684  if (!Cmp)
685  continue;
686 
687  // We are attempting to compute known bits for the operands of an assume.
688  // Do not try to use other assumptions for those recursive calls because
689  // that can lead to mutual recursion and a compile-time explosion.
690  // An example of the mutual recursion: computeKnownBits can call
691  // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
692  // and so on.
693  Query QueryNoAC = Q;
694  QueryNoAC.AC = nullptr;
695 
696  // Note that ptrtoint may change the bitwidth.
697  Value *A, *B;
698  auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
699 
700  CmpInst::Predicate Pred;
701  uint64_t C;
702  switch (Cmp->getPredicate()) {
703  default:
704  break;
705  case ICmpInst::ICMP_EQ:
706  // assume(v = a)
707  if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
708  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
709  KnownBits RHSKnown =
710  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
711  Known.Zero |= RHSKnown.Zero;
712  Known.One |= RHSKnown.One;
713  // assume(v & b = a)
714  } else if (match(Cmp,
715  m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
716  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
717  KnownBits RHSKnown =
718  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
719  KnownBits MaskKnown =
720  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
721 
722  // For those bits in the mask that are known to be one, we can propagate
723  // known bits from the RHS to V.
724  Known.Zero |= RHSKnown.Zero & MaskKnown.One;
725  Known.One |= RHSKnown.One & MaskKnown.One;
726  // assume(~(v & b) = a)
727  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
728  m_Value(A))) &&
729  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
730  KnownBits RHSKnown =
731  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
732  KnownBits MaskKnown =
733  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
734 
735  // For those bits in the mask that are known to be one, we can propagate
736  // inverted known bits from the RHS to V.
737  Known.Zero |= RHSKnown.One & MaskKnown.One;
738  Known.One |= RHSKnown.Zero & MaskKnown.One;
739  // assume(v | b = a)
740  } else if (match(Cmp,
741  m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
742  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
743  KnownBits RHSKnown =
744  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
745  KnownBits BKnown =
746  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
747 
748  // For those bits in B that are known to be zero, we can propagate known
749  // bits from the RHS to V.
750  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
751  Known.One |= RHSKnown.One & BKnown.Zero;
752  // assume(~(v | b) = a)
753  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
754  m_Value(A))) &&
755  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
756  KnownBits RHSKnown =
757  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
758  KnownBits BKnown =
759  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
760 
761  // For those bits in B that are known to be zero, we can propagate
762  // inverted known bits from the RHS to V.
763  Known.Zero |= RHSKnown.One & BKnown.Zero;
764  Known.One |= RHSKnown.Zero & BKnown.Zero;
765  // assume(v ^ b = a)
766  } else if (match(Cmp,
767  m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
768  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
769  KnownBits RHSKnown =
770  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
771  KnownBits BKnown =
772  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
773 
774  // For those bits in B that are known to be zero, we can propagate known
775  // bits from the RHS to V. For those bits in B that are known to be one,
776  // we can propagate inverted known bits from the RHS to V.
777  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
778  Known.One |= RHSKnown.One & BKnown.Zero;
779  Known.Zero |= RHSKnown.One & BKnown.One;
780  Known.One |= RHSKnown.Zero & BKnown.One;
781  // assume(~(v ^ b) = a)
782  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
783  m_Value(A))) &&
784  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785  KnownBits RHSKnown =
786  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
787  KnownBits BKnown =
788  computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
789 
790  // For those bits in B that are known to be zero, we can propagate
791  // inverted known bits from the RHS to V. For those bits in B that are
792  // known to be one, we can propagate known bits from the RHS to V.
793  Known.Zero |= RHSKnown.One & BKnown.Zero;
794  Known.One |= RHSKnown.Zero & BKnown.Zero;
795  Known.Zero |= RHSKnown.Zero & BKnown.One;
796  Known.One |= RHSKnown.One & BKnown.One;
797  // assume(v << c = a)
798  } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
799  m_Value(A))) &&
800  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
801  KnownBits RHSKnown =
802  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
803 
804  // For those bits in RHS that are known, we can propagate them to known
805  // bits in V shifted to the right by C.
806  RHSKnown.Zero.lshrInPlace(C);
807  Known.Zero |= RHSKnown.Zero;
808  RHSKnown.One.lshrInPlace(C);
809  Known.One |= RHSKnown.One;
810  // assume(~(v << c) = a)
811  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
812  m_Value(A))) &&
813  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
814  KnownBits RHSKnown =
815  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
816  // For those bits in RHS that are known, we can propagate them inverted
817  // to known bits in V shifted to the right by C.
818  RHSKnown.One.lshrInPlace(C);
819  Known.Zero |= RHSKnown.One;
820  RHSKnown.Zero.lshrInPlace(C);
821  Known.One |= RHSKnown.Zero;
822  // assume(v >> c = a)
823  } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
824  m_Value(A))) &&
825  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
826  KnownBits RHSKnown =
827  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
828  // For those bits in RHS that are known, we can propagate them to known
829  // bits in V shifted to the right by C.
830  Known.Zero |= RHSKnown.Zero << C;
831  Known.One |= RHSKnown.One << C;
832  // assume(~(v >> c) = a)
833  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
834  m_Value(A))) &&
835  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
836  KnownBits RHSKnown =
837  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
838  // For those bits in RHS that are known, we can propagate them inverted
839  // to known bits in V shifted to the right by C.
840  Known.Zero |= RHSKnown.One << C;
841  Known.One |= RHSKnown.Zero << C;
842  }
843  break;
844  case ICmpInst::ICMP_SGE:
845  // assume(v >=_s c) where c is non-negative
846  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
847  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
848  KnownBits RHSKnown =
849  computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
850 
851  if (RHSKnown.isNonNegative()) {
852  // We know that the sign bit is zero.
853  Known.makeNonNegative();
854  }
855  }
856  break;
857  case ICmpInst::ICMP_SGT:
858  // assume(v >_s c) where c is at least -1.
859  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
860  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
861  KnownBits RHSKnown =
862  computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
863 
864  if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
865  // We know that the sign bit is zero.
866  Known.makeNonNegative();
867  }
868  }
869  break;
870  case ICmpInst::ICMP_SLE:
871  // assume(v <=_s c) where c is negative
872  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
873  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
874  KnownBits RHSKnown =
875  computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
876 
877  if (RHSKnown.isNegative()) {
878  // We know that the sign bit is one.
879  Known.makeNegative();
880  }
881  }
882  break;
883  case ICmpInst::ICMP_SLT:
884  // assume(v <_s c) where c is non-positive
885  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
886  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
887  KnownBits RHSKnown =
888  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
889 
890  if (RHSKnown.isZero() || RHSKnown.isNegative()) {
891  // We know that the sign bit is one.
892  Known.makeNegative();
893  }
894  }
895  break;
896  case ICmpInst::ICMP_ULE:
897  // assume(v <=_u c)
898  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
899  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
900  KnownBits RHSKnown =
901  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
902 
903  // Whatever high bits in c are zero are known to be zero.
904  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
905  }
906  break;
907  case ICmpInst::ICMP_ULT:
908  // assume(v <_u c)
909  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
910  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
911  KnownBits RHSKnown =
912  computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
913 
914  // If the RHS is known zero, then this assumption must be wrong (nothing
915  // is unsigned less than zero). Signal a conflict and get out of here.
916  if (RHSKnown.isZero()) {
917  Known.Zero.setAllBits();
918  Known.One.setAllBits();
919  break;
920  }
921 
922  // Whatever high bits in c are zero are known to be zero (if c is a power
923  // of 2, then one more).
924  if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
925  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
926  else
927  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
928  }
929  break;
930  }
931  }
932 
933  // If assumptions conflict with each other or previous known bits, then we
934  // have a logical fallacy. It's possible that the assumption is not reachable,
935  // so this isn't a real bug. On the other hand, the program may have undefined
936  // behavior, or we might have a bug in the compiler. We can't assert/crash, so
937  // clear out the known bits, try to warn the user, and hope for the best.
938  if (Known.Zero.intersects(Known.One)) {
939  Known.resetAll();
940 
941  if (Q.ORE)
942  Q.ORE->emit([&]() {
943  auto *CxtI = const_cast<Instruction *>(Q.CxtI);
944  return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
945  CxtI)
946  << "Detected conflicting code assumptions. Program may "
947  "have undefined behavior, or compiler may have "
948  "internal error.";
949  });
950  }
951 }
952 
953 /// Compute known bits from a shift operator, including those with a
954 /// non-constant shift amount. Known is the output of this function. Known2 is a
955 /// pre-allocated temporary with the same bit width as Known and on return
956 /// contains the known bit of the shift value source. KF is an
957 /// operator-specific function that, given the known-bits and a shift amount,
958 /// compute the implied known-bits of the shift operator's result respectively
959 /// for that shift amount. The results from calling KF are conservatively
960 /// combined for all permitted shift amounts.
962  const Operator *I, const APInt &DemandedElts, KnownBits &Known,
963  KnownBits &Known2, unsigned Depth, const Query &Q,
964  function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
965  unsigned BitWidth = Known.getBitWidth();
966  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
967  computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
968 
969  // Note: We cannot use Known.Zero.getLimitedValue() here, because if
970  // BitWidth > 64 and any upper bits are known, we'll end up returning the
971  // limit value (which implies all bits are known).
972  uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
973  uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
974  bool ShiftAmtIsConstant = Known.isConstant();
975  bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
976 
977  if (ShiftAmtIsConstant) {
978  Known = KF(Known2, Known);
979 
980  // If the known bits conflict, this must be an overflowing left shift, so
981  // the shift result is poison. We can return anything we want. Choose 0 for
982  // the best folding opportunity.
983  if (Known.hasConflict())
984  Known.setAllZero();
985 
986  return;
987  }
988 
989  // If the shift amount could be greater than or equal to the bit-width of the
990  // LHS, the value could be poison, but bail out because the check below is
991  // expensive.
992  // TODO: Should we just carry on?
993  if (MaxShiftAmtIsOutOfRange) {
994  Known.resetAll();
995  return;
996  }
997 
998  // It would be more-clearly correct to use the two temporaries for this
999  // calculation. Reusing the APInts here to prevent unnecessary allocations.
1000  Known.resetAll();
1001 
1002  // If we know the shifter operand is nonzero, we can sometimes infer more
1003  // known bits. However this is expensive to compute, so be lazy about it and
1004  // only compute it when absolutely necessary.
1005  Optional<bool> ShifterOperandIsNonZero;
1006 
1007  // Early exit if we can't constrain any well-defined shift amount.
1008  if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1009  !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1010  ShifterOperandIsNonZero =
1011  isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1012  if (!*ShifterOperandIsNonZero)
1013  return;
1014  }
1015 
1016  Known.Zero.setAllBits();
1017  Known.One.setAllBits();
1018  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1019  // Combine the shifted known input bits only for those shift amounts
1020  // compatible with its known constraints.
1021  if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1022  continue;
1023  if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1024  continue;
1025  // If we know the shifter is nonzero, we may be able to infer more known
1026  // bits. This check is sunk down as far as possible to avoid the expensive
1027  // call to isKnownNonZero if the cheaper checks above fail.
1028  if (ShiftAmt == 0) {
1029  if (!ShifterOperandIsNonZero.hasValue())
1030  ShifterOperandIsNonZero =
1031  isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032  if (*ShifterOperandIsNonZero)
1033  continue;
1034  }
1035 
1036  Known = KnownBits::commonBits(
1037  Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1038  }
1039 
1040  // If the known bits conflict, the result is poison. Return a 0 and hope the
1041  // caller can further optimize that.
1042  if (Known.hasConflict())
1043  Known.setAllZero();
1044 }
1045 
1047  const APInt &DemandedElts,
1048  KnownBits &Known, unsigned Depth,
1049  const Query &Q) {
1050  unsigned BitWidth = Known.getBitWidth();
1051 
1052  KnownBits Known2(BitWidth);
1053  switch (I->getOpcode()) {
1054  default: break;
1055  case Instruction::Load:
1056  if (MDNode *MD =
1057  Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1059  break;
1060  case Instruction::And: {
1061  // If either the LHS or the RHS are Zero, the result is zero.
1062  computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1063  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1064 
1065  Known &= Known2;
1066 
1067  // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1068  // here we handle the more general case of adding any odd number by
1069  // matching the form add(x, add(x, y)) where y is odd.
1070  // TODO: This could be generalized to clearing any bit set in y where the
1071  // following bit is known to be unset in y.
1072  Value *X = nullptr, *Y = nullptr;
1073  if (!Known.Zero[0] && !Known.One[0] &&
1075  Known2.resetAll();
1076  computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1077  if (Known2.countMinTrailingOnes() > 0)
1078  Known.Zero.setBit(0);
1079  }
1080  break;
1081  }
1082  case Instruction::Or:
1083  computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1084  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1085 
1086  Known |= Known2;
1087  break;
1088  case Instruction::Xor:
1089  computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1090  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1091 
1092  Known ^= Known2;
1093  break;
1094  case Instruction::Mul: {
1095  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1096  computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1097  Known, Known2, Depth, Q);
1098  break;
1099  }
1100  case Instruction::UDiv: {
1101  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1102  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1103  Known = KnownBits::udiv(Known, Known2);
1104  break;
1105  }
1106  case Instruction::Select: {
1107  const Value *LHS = nullptr, *RHS = nullptr;
1110  computeKnownBits(RHS, Known, Depth + 1, Q);
1111  computeKnownBits(LHS, Known2, Depth + 1, Q);
1112  switch (SPF) {
1113  default:
1114  llvm_unreachable("Unhandled select pattern flavor!");
1115  case SPF_SMAX:
1116  Known = KnownBits::smax(Known, Known2);
1117  break;
1118  case SPF_SMIN:
1119  Known = KnownBits::smin(Known, Known2);
1120  break;
1121  case SPF_UMAX:
1122  Known = KnownBits::umax(Known, Known2);
1123  break;
1124  case SPF_UMIN:
1125  Known = KnownBits::umin(Known, Known2);
1126  break;
1127  }
1128  break;
1129  }
1130 
1131  computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1132  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1133 
1134  // Only known if known in both the LHS and RHS.
1135  Known = KnownBits::commonBits(Known, Known2);
1136 
1137  if (SPF == SPF_ABS) {
1138  // RHS from matchSelectPattern returns the negation part of abs pattern.
1139  // If the negate has an NSW flag we can assume the sign bit of the result
1140  // will be 0 because that makes abs(INT_MIN) undefined.
1141  if (match(RHS, m_Neg(m_Specific(LHS))) &&
1142  Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1143  Known.Zero.setSignBit();
1144  }
1145 
1146  break;
1147  }
1148  case Instruction::FPTrunc:
1149  case Instruction::FPExt:
1150  case Instruction::FPToUI:
1151  case Instruction::FPToSI:
1152  case Instruction::SIToFP:
1153  case Instruction::UIToFP:
1154  break; // Can't work with floating point.
1155  case Instruction::PtrToInt:
1156  case Instruction::IntToPtr:
1157  // Fall through and handle them the same as zext/trunc.
1159  case Instruction::ZExt:
1160  case Instruction::Trunc: {
1161  Type *SrcTy = I->getOperand(0)->getType();
1162 
1163  unsigned SrcBitWidth;
1164  // Note that we handle pointer operands here because of inttoptr/ptrtoint
1165  // which fall through here.
1166  Type *ScalarTy = SrcTy->getScalarType();
1167  SrcBitWidth = ScalarTy->isPointerTy() ?
1168  Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1169  Q.DL.getTypeSizeInBits(ScalarTy);
1170 
1171  assert(SrcBitWidth && "SrcBitWidth can't be zero");
1172  Known = Known.anyextOrTrunc(SrcBitWidth);
1173  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1174  Known = Known.zextOrTrunc(BitWidth);
1175  break;
1176  }
1177  case Instruction::BitCast: {
1178  Type *SrcTy = I->getOperand(0)->getType();
1179  if (SrcTy->isIntOrPtrTy() &&
1180  // TODO: For now, not handling conversions like:
1181  // (bitcast i64 %x to <2 x i32>)
1182  !I->getType()->isVectorTy()) {
1183  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1184  break;
1185  }
1186 
1187  // Handle cast from vector integer type to scalar or vector integer.
1188  auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1189  if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1190  !I->getType()->isIntOrIntVectorTy())
1191  break;
1192 
1193  // Look through a cast from narrow vector elements to wider type.
1194  // Examples: v4i32 -> v2i64, v3i8 -> v24
1195  unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1196  if (BitWidth % SubBitWidth == 0) {
1197  // Known bits are automatically intersected across demanded elements of a
1198  // vector. So for example, if a bit is computed as known zero, it must be
1199  // zero across all demanded elements of the vector.
1200  //
1201  // For this bitcast, each demanded element of the output is sub-divided
1202  // across a set of smaller vector elements in the source vector. To get
1203  // the known bits for an entire element of the output, compute the known
1204  // bits for each sub-element sequentially. This is done by shifting the
1205  // one-set-bit demanded elements parameter across the sub-elements for
1206  // consecutive calls to computeKnownBits. We are using the demanded
1207  // elements parameter as a mask operator.
1208  //
1209  // The known bits of each sub-element are then inserted into place
1210  // (dependent on endian) to form the full result of known bits.
1211  unsigned NumElts = DemandedElts.getBitWidth();
1212  unsigned SubScale = BitWidth / SubBitWidth;
1213  APInt SubDemandedElts = APInt::getNullValue(NumElts * SubScale);
1214  for (unsigned i = 0; i != NumElts; ++i) {
1215  if (DemandedElts[i])
1216  SubDemandedElts.setBit(i * SubScale);
1217  }
1218 
1219  KnownBits KnownSrc(SubBitWidth);
1220  for (unsigned i = 0; i != SubScale; ++i) {
1221  computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1222  Depth + 1, Q);
1223  unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1224  Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1225  }
1226  }
1227  break;
1228  }
1229  case Instruction::SExt: {
1230  // Compute the bits in the result that are not present in the input.
1231  unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1232 
1233  Known = Known.trunc(SrcBitWidth);
1234  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1235  // If the sign bit of the input is known set or clear, then we know the
1236  // top bits of the result.
1237  Known = Known.sext(BitWidth);
1238  break;
1239  }
1240  case Instruction::Shl: {
1241  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1242  auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243  KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1244  // If this shift has "nsw" keyword, then the result is either a poison
1245  // value or has the same sign bit as the first operand.
1246  if (NSW) {
1247  if (KnownVal.Zero.isSignBitSet())
1248  Result.Zero.setSignBit();
1249  if (KnownVal.One.isSignBitSet())
1250  Result.One.setSignBit();
1251  }
1252  return Result;
1253  };
1254  computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1255  KF);
1256  // Trailing zeros of a right-shifted constant never decrease.
1257  const APInt *C;
1258  if (match(I->getOperand(0), m_APInt(C)))
1259  Known.Zero.setLowBits(C->countTrailingZeros());
1260  break;
1261  }
1262  case Instruction::LShr: {
1263  auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1264  return KnownBits::lshr(KnownVal, KnownAmt);
1265  };
1266  computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1267  KF);
1268  // Leading zeros of a left-shifted constant never decrease.
1269  const APInt *C;
1270  if (match(I->getOperand(0), m_APInt(C)))
1271  Known.Zero.setHighBits(C->countLeadingZeros());
1272  break;
1273  }
1274  case Instruction::AShr: {
1275  auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1276  return KnownBits::ashr(KnownVal, KnownAmt);
1277  };
1278  computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1279  KF);
1280  break;
1281  }
1282  case Instruction::Sub: {
1283  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1284  computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1285  DemandedElts, Known, Known2, Depth, Q);
1286  break;
1287  }
1288  case Instruction::Add: {
1289  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1290  computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1291  DemandedElts, Known, Known2, Depth, Q);
1292  break;
1293  }
1294  case Instruction::SRem:
1295  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1296  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1297  Known = KnownBits::srem(Known, Known2);
1298  break;
1299 
1300  case Instruction::URem:
1301  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1302  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1303  Known = KnownBits::urem(Known, Known2);
1304  break;
1305  case Instruction::Alloca:
1306  Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1307  break;
1308  case Instruction::GetElementPtr: {
1309  // Analyze all of the subscripts of this getelementptr instruction
1310  // to determine if we can prove known low zero bits.
1311  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1312  // Accumulate the constant indices in a separate variable
1313  // to minimize the number of calls to computeForAddSub.
1314  APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1315 
1317  for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1318  // TrailZ can only become smaller, short-circuit if we hit zero.
1319  if (Known.isUnknown())
1320  break;
1321 
1322  Value *Index = I->getOperand(i);
1323 
1324  // Handle case when index is zero.
1325  Constant *CIndex = dyn_cast<Constant>(Index);
1326  if (CIndex && CIndex->isZeroValue())
1327  continue;
1328 
1329  if (StructType *STy = GTI.getStructTypeOrNull()) {
1330  // Handle struct member offset arithmetic.
1331 
1332  assert(CIndex &&
1333  "Access to structure field must be known at compile time");
1334 
1335  if (CIndex->getType()->isVectorTy())
1336  Index = CIndex->getSplatValue();
1337 
1338  unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1339  const StructLayout *SL = Q.DL.getStructLayout(STy);
1340  uint64_t Offset = SL->getElementOffset(Idx);
1341  AccConstIndices += Offset;
1342  continue;
1343  }
1344 
1345  // Handle array index arithmetic.
1346  Type *IndexedTy = GTI.getIndexedType();
1347  if (!IndexedTy->isSized()) {
1348  Known.resetAll();
1349  break;
1350  }
1351 
1352  unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1353  KnownBits IndexBits(IndexBitWidth);
1354  computeKnownBits(Index, IndexBits, Depth + 1, Q);
1355  TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1356  uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1357  KnownBits ScalingFactor(IndexBitWidth);
1358  // Multiply by current sizeof type.
1359  // &A[i] == A + i * sizeof(*A[i]).
1360  if (IndexTypeSize.isScalable()) {
1361  // For scalable types the only thing we know about sizeof is
1362  // that this is a multiple of the minimum size.
1363  ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1364  } else if (IndexBits.isConstant()) {
1365  APInt IndexConst = IndexBits.getConstant();
1366  APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1367  IndexConst *= ScalingFactor;
1368  AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1369  continue;
1370  } else {
1371  ScalingFactor =
1372  KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1373  }
1374  IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1375 
1376  // If the offsets have a different width from the pointer, according
1377  // to the language reference we need to sign-extend or truncate them
1378  // to the width of the pointer.
1379  IndexBits = IndexBits.sextOrTrunc(BitWidth);
1380 
1381  // Note that inbounds does *not* guarantee nsw for the addition, as only
1382  // the offset is signed, while the base address is unsigned.
1384  /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1385  }
1386  if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1387  KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1389  /*Add=*/true, /*NSW=*/false, Known, Index);
1390  }
1391  break;
1392  }
1393  case Instruction::PHI: {
1394  const PHINode *P = cast<PHINode>(I);
1395  BinaryOperator *BO = nullptr;
1396  Value *R = nullptr, *L = nullptr;
1397  if (matchSimpleRecurrence(P, BO, R, L)) {
1398  // Handle the case of a simple two-predecessor recurrence PHI.
1399  // There's a lot more that could theoretically be done here, but
1400  // this is sufficient to catch some interesting cases.
1401  unsigned Opcode = BO->getOpcode();
1402 
1403  // If this is a shift recurrence, we know the bits being shifted in.
1404  // We can combine that with information about the start value of the
1405  // recurrence to conclude facts about the result.
1406  if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1407  Opcode == Instruction::Shl) &&
1408  BO->getOperand(0) == I) {
1409 
1410  // We have matched a recurrence of the form:
1411  // %iv = [R, %entry], [%iv.next, %backedge]
1412  // %iv.next = shift_op %iv, L
1413 
1414  // Recurse with the phi context to avoid concern about whether facts
1415  // inferred hold at original context instruction. TODO: It may be
1416  // correct to use the original context. IF warranted, explore and
1417  // add sufficient tests to cover.
1418  Query RecQ = Q;
1419  RecQ.CxtI = P;
1420  computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1421  switch (Opcode) {
1422  case Instruction::Shl:
1423  // A shl recurrence will only increase the tailing zeros
1424  Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1425  break;
1426  case Instruction::LShr:
1427  // A lshr recurrence will preserve the leading zeros of the
1428  // start value
1429  Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1430  break;
1431  case Instruction::AShr:
1432  // An ashr recurrence will extend the initial sign bit
1433  Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1434  Known.One.setHighBits(Known2.countMinLeadingOnes());
1435  break;
1436  };
1437  }
1438 
1439  // Check for operations that have the property that if
1440  // both their operands have low zero bits, the result
1441  // will have low zero bits.
1442  if (Opcode == Instruction::Add ||
1443  Opcode == Instruction::Sub ||
1444  Opcode == Instruction::And ||
1445  Opcode == Instruction::Or ||
1446  Opcode == Instruction::Mul) {
1447  // Change the context instruction to the "edge" that flows into the
1448  // phi. This is important because that is where the value is actually
1449  // "evaluated" even though it is used later somewhere else. (see also
1450  // D69571).
1451  Query RecQ = Q;
1452 
1453  unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1454  Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1455  Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1456 
1457  // Ok, we have a PHI of the form L op= R. Check for low
1458  // zero bits.
1459  RecQ.CxtI = RInst;
1460  computeKnownBits(R, Known2, Depth + 1, RecQ);
1461 
1462  // We need to take the minimum number of known bits
1463  KnownBits Known3(BitWidth);
1464  RecQ.CxtI = LInst;
1465  computeKnownBits(L, Known3, Depth + 1, RecQ);
1466 
1468  Known3.countMinTrailingZeros()));
1469 
1470  auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1471  if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1472  // If initial value of recurrence is nonnegative, and we are adding
1473  // a nonnegative number with nsw, the result can only be nonnegative
1474  // or poison value regardless of the number of times we execute the
1475  // add in phi recurrence. If initial value is negative and we are
1476  // adding a negative number with nsw, the result can only be
1477  // negative or poison value. Similar arguments apply to sub and mul.
1478  //
1479  // (add non-negative, non-negative) --> non-negative
1480  // (add negative, negative) --> negative
1481  if (Opcode == Instruction::Add) {
1482  if (Known2.isNonNegative() && Known3.isNonNegative())
1483  Known.makeNonNegative();
1484  else if (Known2.isNegative() && Known3.isNegative())
1485  Known.makeNegative();
1486  }
1487 
1488  // (sub nsw non-negative, negative) --> non-negative
1489  // (sub nsw negative, non-negative) --> negative
1490  else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1491  if (Known2.isNonNegative() && Known3.isNegative())
1492  Known.makeNonNegative();
1493  else if (Known2.isNegative() && Known3.isNonNegative())
1494  Known.makeNegative();
1495  }
1496 
1497  // (mul nsw non-negative, non-negative) --> non-negative
1498  else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1499  Known3.isNonNegative())
1500  Known.makeNonNegative();
1501  }
1502 
1503  break;
1504  }
1505  }
1506 
1507  // Unreachable blocks may have zero-operand PHI nodes.
1508  if (P->getNumIncomingValues() == 0)
1509  break;
1510 
1511  // Otherwise take the unions of the known bit sets of the operands,
1512  // taking conservative care to avoid excessive recursion.
1513  if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1514  // Skip if every incoming value references to ourself.
1515  if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1516  break;
1517 
1518  Known.Zero.setAllBits();
1519  Known.One.setAllBits();
1520  for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1521  Value *IncValue = P->getIncomingValue(u);
1522  // Skip direct self references.
1523  if (IncValue == P) continue;
1524 
1525  // Change the context instruction to the "edge" that flows into the
1526  // phi. This is important because that is where the value is actually
1527  // "evaluated" even though it is used later somewhere else. (see also
1528  // D69571).
1529  Query RecQ = Q;
1530  RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1531 
1532  Known2 = KnownBits(BitWidth);
1533  // Recurse, but cap the recursion to one level, because we don't
1534  // want to waste time spinning around in loops.
1535  computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1536  Known = KnownBits::commonBits(Known, Known2);
1537  // If all bits have been ruled out, there's no need to check
1538  // more operands.
1539  if (Known.isUnknown())
1540  break;
1541  }
1542  }
1543  break;
1544  }
1545  case Instruction::Call:
1546  case Instruction::Invoke:
1547  // If range metadata is attached to this call, set known bits from that,
1548  // and then intersect with known bits based on other properties of the
1549  // function.
1550  if (MDNode *MD =
1551  Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1553  if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1554  computeKnownBits(RV, Known2, Depth + 1, Q);
1555  Known.Zero |= Known2.Zero;
1556  Known.One |= Known2.One;
1557  }
1558  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1559  switch (II->getIntrinsicID()) {
1560  default: break;
1561  case Intrinsic::abs: {
1562  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1563  bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1564  Known = Known2.abs(IntMinIsPoison);
1565  break;
1566  }
1567  case Intrinsic::bitreverse:
1568  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1569  Known.Zero |= Known2.Zero.reverseBits();
1570  Known.One |= Known2.One.reverseBits();
1571  break;
1572  case Intrinsic::bswap:
1573  computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1574  Known.Zero |= Known2.Zero.byteSwap();
1575  Known.One |= Known2.One.byteSwap();
1576  break;
1577  case Intrinsic::ctlz: {
1578  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1579  // If we have a known 1, its position is our upper bound.
1580  unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1581  // If this call is undefined for 0, the result will be less than 2^n.
1582  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1583  PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1584  unsigned LowBits = Log2_32(PossibleLZ)+1;
1585  Known.Zero.setBitsFrom(LowBits);
1586  break;
1587  }
1588  case Intrinsic::cttz: {
1589  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1590  // If we have a known 1, its position is our upper bound.
1591  unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1592  // If this call is undefined for 0, the result will be less than 2^n.
1593  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1594  PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1595  unsigned LowBits = Log2_32(PossibleTZ)+1;
1596  Known.Zero.setBitsFrom(LowBits);
1597  break;
1598  }
1599  case Intrinsic::ctpop: {
1600  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1601  // We can bound the space the count needs. Also, bits known to be zero
1602  // can't contribute to the population.
1603  unsigned BitsPossiblySet = Known2.countMaxPopulation();
1604  unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1605  Known.Zero.setBitsFrom(LowBits);
1606  // TODO: we could bound KnownOne using the lower bound on the number
1607  // of bits which might be set provided by popcnt KnownOne2.
1608  break;
1609  }
1610  case Intrinsic::fshr:
1611  case Intrinsic::fshl: {
1612  const APInt *SA;
1613  if (!match(I->getOperand(2), m_APInt(SA)))
1614  break;
1615 
1616  // Normalize to funnel shift left.
1617  uint64_t ShiftAmt = SA->urem(BitWidth);
1618  if (II->getIntrinsicID() == Intrinsic::fshr)
1619  ShiftAmt = BitWidth - ShiftAmt;
1620 
1621  KnownBits Known3(BitWidth);
1622  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1623  computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1624 
1625  Known.Zero =
1626  Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1627  Known.One =
1628  Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1629  break;
1630  }
1631  case Intrinsic::uadd_sat:
1632  case Intrinsic::usub_sat: {
1633  bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1634  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1635  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1636 
1637  // Add: Leading ones of either operand are preserved.
1638  // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1639  // as leading zeros in the result.
1640  unsigned LeadingKnown;
1641  if (IsAdd)
1642  LeadingKnown = std::max(Known.countMinLeadingOnes(),
1643  Known2.countMinLeadingOnes());
1644  else
1645  LeadingKnown = std::max(Known.countMinLeadingZeros(),
1646  Known2.countMinLeadingOnes());
1647 
1649  IsAdd, /* NSW */ false, Known, Known2);
1650 
1651  // We select between the operation result and all-ones/zero
1652  // respectively, so we can preserve known ones/zeros.
1653  if (IsAdd) {
1654  Known.One.setHighBits(LeadingKnown);
1655  Known.Zero.clearAllBits();
1656  } else {
1657  Known.Zero.setHighBits(LeadingKnown);
1658  Known.One.clearAllBits();
1659  }
1660  break;
1661  }
1662  case Intrinsic::umin:
1663  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1664  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1665  Known = KnownBits::umin(Known, Known2);
1666  break;
1667  case Intrinsic::umax:
1668  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1669  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1670  Known = KnownBits::umax(Known, Known2);
1671  break;
1672  case Intrinsic::smin:
1673  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1674  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1675  Known = KnownBits::smin(Known, Known2);
1676  break;
1677  case Intrinsic::smax:
1678  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1679  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1680  Known = KnownBits::smax(Known, Known2);
1681  break;
1682  case Intrinsic::x86_sse42_crc32_64_64:
1683  Known.Zero.setBitsFrom(32);
1684  break;
1685  case Intrinsic::riscv_vsetvli:
1686  case Intrinsic::riscv_vsetvlimax:
1687  // Assume that VL output is positive and would fit in an int32_t.
1688  // TODO: VLEN might be capped at 16 bits in a future V spec update.
1689  if (BitWidth >= 32)
1690  Known.Zero.setBitsFrom(31);
1691  break;
1692  }
1693  }
1694  break;
1695  case Instruction::ShuffleVector: {
1696  auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1697  // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1698  if (!Shuf) {
1699  Known.resetAll();
1700  return;
1701  }
1702  // For undef elements, we don't know anything about the common state of
1703  // the shuffle result.
1704  APInt DemandedLHS, DemandedRHS;
1705  if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1706  Known.resetAll();
1707  return;
1708  }
1709  Known.One.setAllBits();
1710  Known.Zero.setAllBits();
1711  if (!!DemandedLHS) {
1712  const Value *LHS = Shuf->getOperand(0);
1713  computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1714  // If we don't know any bits, early out.
1715  if (Known.isUnknown())
1716  break;
1717  }
1718  if (!!DemandedRHS) {
1719  const Value *RHS = Shuf->getOperand(1);
1720  computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1721  Known = KnownBits::commonBits(Known, Known2);
1722  }
1723  break;
1724  }
1725  case Instruction::InsertElement: {
1726  const Value *Vec = I->getOperand(0);
1727  const Value *Elt = I->getOperand(1);
1728  auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1729  // Early out if the index is non-constant or out-of-range.
1730  unsigned NumElts = DemandedElts.getBitWidth();
1731  if (!CIdx || CIdx->getValue().uge(NumElts)) {
1732  Known.resetAll();
1733  return;
1734  }
1735  Known.One.setAllBits();
1736  Known.Zero.setAllBits();
1737  unsigned EltIdx = CIdx->getZExtValue();
1738  // Do we demand the inserted element?
1739  if (DemandedElts[EltIdx]) {
1740  computeKnownBits(Elt, Known, Depth + 1, Q);
1741  // If we don't know any bits, early out.
1742  if (Known.isUnknown())
1743  break;
1744  }
1745  // We don't need the base vector element that has been inserted.
1746  APInt DemandedVecElts = DemandedElts;
1747  DemandedVecElts.clearBit(EltIdx);
1748  if (!!DemandedVecElts) {
1749  computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1750  Known = KnownBits::commonBits(Known, Known2);
1751  }
1752  break;
1753  }
1754  case Instruction::ExtractElement: {
1755  // Look through extract element. If the index is non-constant or
1756  // out-of-range demand all elements, otherwise just the extracted element.
1757  const Value *Vec = I->getOperand(0);
1758  const Value *Idx = I->getOperand(1);
1759  auto *CIdx = dyn_cast<ConstantInt>(Idx);
1760  if (isa<ScalableVectorType>(Vec->getType())) {
1761  // FIXME: there's probably *something* we can do with scalable vectors
1762  Known.resetAll();
1763  break;
1764  }
1765  unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1766  APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1767  if (CIdx && CIdx->getValue().ult(NumElts))
1768  DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1769  computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1770  break;
1771  }
1772  case Instruction::ExtractValue:
1773  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1774  const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1775  if (EVI->getNumIndices() != 1) break;
1776  if (EVI->getIndices()[0] == 0) {
1777  switch (II->getIntrinsicID()) {
1778  default: break;
1779  case Intrinsic::uadd_with_overflow:
1780  case Intrinsic::sadd_with_overflow:
1781  computeKnownBitsAddSub(true, II->getArgOperand(0),
1782  II->getArgOperand(1), false, DemandedElts,
1783  Known, Known2, Depth, Q);
1784  break;
1785  case Intrinsic::usub_with_overflow:
1786  case Intrinsic::ssub_with_overflow:
1787  computeKnownBitsAddSub(false, II->getArgOperand(0),
1788  II->getArgOperand(1), false, DemandedElts,
1789  Known, Known2, Depth, Q);
1790  break;
1791  case Intrinsic::umul_with_overflow:
1792  case Intrinsic::smul_with_overflow:
1793  computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1794  DemandedElts, Known, Known2, Depth, Q);
1795  break;
1796  }
1797  }
1798  }
1799  break;
1800  case Instruction::Freeze:
1801  if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1802  Depth + 1))
1803  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1804  break;
1805  }
1806 }
1807 
1808 /// Determine which bits of V are known to be either zero or one and return
1809 /// them.
1810 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1811  unsigned Depth, const Query &Q) {
1812  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1813  computeKnownBits(V, DemandedElts, Known, Depth, Q);
1814  return Known;
1815 }
1816 
1817 /// Determine which bits of V are known to be either zero or one and return
1818 /// them.
1819 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1820  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1821  computeKnownBits(V, Known, Depth, Q);
1822  return Known;
1823 }
1824 
1825 /// Determine which bits of V are known to be either zero or one and return
1826 /// them in the Known bit set.
1827 ///
1828 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1829 /// we cannot optimize based on the assumption that it is zero without changing
1830 /// it to be an explicit zero. If we don't change it to zero, other code could
1831 /// optimized based on the contradictory assumption that it is non-zero.
1832 /// Because instcombine aggressively folds operations with undef args anyway,
1833 /// this won't lose us code quality.
1834 ///
1835 /// This function is defined on values with integer type, values with pointer
1836 /// type, and vectors of integers. In the case
1837 /// where V is a vector, known zero, and known one values are the
1838 /// same width as the vector element, and the bit is set only if it is true
1839 /// for all of the demanded elements in the vector specified by DemandedElts.
1840 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1841  KnownBits &Known, unsigned Depth, const Query &Q) {
1842  if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1843  // No demanded elts or V is a scalable vector, better to assume we don't
1844  // know anything.
1845  Known.resetAll();
1846  return;
1847  }
1848 
1849  assert(V && "No Value?");
1850  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1851 
1852 #ifndef NDEBUG
1853  Type *Ty = V->getType();
1854  unsigned BitWidth = Known.getBitWidth();
1855 
1857  "Not integer or pointer type!");
1858 
1859  if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1860  assert(
1861  FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1862  "DemandedElt width should equal the fixed vector number of elements");
1863  } else {
1864  assert(DemandedElts == APInt(1, 1) &&
1865  "DemandedElt width should be 1 for scalars");
1866  }
1867 
1868  Type *ScalarTy = Ty->getScalarType();
1869  if (ScalarTy->isPointerTy()) {
1870  assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1871  "V and Known should have same BitWidth");
1872  } else {
1873  assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1874  "V and Known should have same BitWidth");
1875  }
1876 #endif
1877 
1878  const APInt *C;
1879  if (match(V, m_APInt(C))) {
1880  // We know all of the bits for a scalar constant or a splat vector constant!
1881  Known = KnownBits::makeConstant(*C);
1882  return;
1883  }
1884  // Null and aggregate-zero are all-zeros.
1885  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1886  Known.setAllZero();
1887  return;
1888  }
1889  // Handle a constant vector by taking the intersection of the known bits of
1890  // each element.
1891  if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1892  // We know that CDV must be a vector of integers. Take the intersection of
1893  // each element.
1894  Known.Zero.setAllBits(); Known.One.setAllBits();
1895  for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1896  if (!DemandedElts[i])
1897  continue;
1898  APInt Elt = CDV->getElementAsAPInt(i);
1899  Known.Zero &= ~Elt;
1900  Known.One &= Elt;
1901  }
1902  return;
1903  }
1904 
1905  if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1906  // We know that CV must be a vector of integers. Take the intersection of
1907  // each element.
1908  Known.Zero.setAllBits(); Known.One.setAllBits();
1909  for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1910  if (!DemandedElts[i])
1911  continue;
1912  Constant *Element = CV->getAggregateElement(i);
1913  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1914  if (!ElementCI) {
1915  Known.resetAll();
1916  return;
1917  }
1918  const APInt &Elt = ElementCI->getValue();
1919  Known.Zero &= ~Elt;
1920  Known.One &= Elt;
1921  }
1922  return;
1923  }
1924 
1925  // Start out not knowing anything.
1926  Known.resetAll();
1927 
1928  // We can't imply anything about undefs.
1929  if (isa<UndefValue>(V))
1930  return;
1931 
1932  // There's no point in looking through other users of ConstantData for
1933  // assumptions. Confirm that we've handled them all.
1934  assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1935 
1936  // All recursive calls that increase depth must come after this.
1938  return;
1939 
1940  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1941  // the bits of its aliasee.
1942  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1943  if (!GA->isInterposable())
1944  computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1945  return;
1946  }
1947 
1948  if (const Operator *I = dyn_cast<Operator>(V))
1949  computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1950 
1951  // Aligned pointers have trailing zeros - refine Known.Zero set
1952  if (isa<PointerType>(V->getType())) {
1953  Align Alignment = V->getPointerAlignment(Q.DL);
1954  Known.Zero.setLowBits(Log2(Alignment));
1955  }
1956 
1957  // computeKnownBitsFromAssume strictly refines Known.
1958  // Therefore, we run them after computeKnownBitsFromOperator.
1959 
1960  // Check whether a nearby assume intrinsic can determine some known bits.
1961  computeKnownBitsFromAssume(V, Known, Depth, Q);
1962 
1963  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1964 }
1965 
1966 /// Return true if the given value is known to have exactly one
1967 /// bit set when defined. For vectors return true if every element is known to
1968 /// be a power of two when defined. Supports values with integer or pointer
1969 /// types and vectors of integers.
1970 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1971  const Query &Q) {
1972  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1973 
1974  // Attempt to match against constants.
1975  if (OrZero && match(V, m_Power2OrZero()))
1976  return true;
1977  if (match(V, m_Power2()))
1978  return true;
1979 
1980  // 1 << X is clearly a power of two if the one is not shifted off the end. If
1981  // it is shifted off the end then the result is undefined.
1982  if (match(V, m_Shl(m_One(), m_Value())))
1983  return true;
1984 
1985  // (signmask) >>l X is clearly a power of two if the one is not shifted off
1986  // the bottom. If it is shifted off the bottom then the result is undefined.
1987  if (match(V, m_LShr(m_SignMask(), m_Value())))
1988  return true;
1989 
1990  // The remaining tests are all recursive, so bail out if we hit the limit.
1992  return false;
1993 
1994  Value *X = nullptr, *Y = nullptr;
1995  // A shift left or a logical shift right of a power of two is a power of two
1996  // or zero.
1997  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1998  match(V, m_LShr(m_Value(X), m_Value()))))
1999  return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2000 
2001  if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2002  return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2003 
2004  if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2005  return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2006  isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2007 
2008  // Peek through min/max.
2009  if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2010  return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2011  isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2012  }
2013 
2014  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2015  // A power of two and'd with anything is a power of two or zero.
2016  if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2017  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2018  return true;
2019  // X & (-X) is always a power of two or zero.
2020  if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2021  return true;
2022  return false;
2023  }
2024 
2025  // Adding a power-of-two or zero to the same power-of-two or zero yields
2026  // either the original power-of-two, a larger power-of-two or zero.
2027  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2028  const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2029  if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2030  Q.IIQ.hasNoSignedWrap(VOBO)) {
2031  if (match(X, m_And(m_Specific(Y), m_Value())) ||
2032  match(X, m_And(m_Value(), m_Specific(Y))))
2033  if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2034  return true;
2035  if (match(Y, m_And(m_Specific(X), m_Value())) ||
2036  match(Y, m_And(m_Value(), m_Specific(X))))
2037  if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2038  return true;
2039 
2040  unsigned BitWidth = V->getType()->getScalarSizeInBits();
2041  KnownBits LHSBits(BitWidth);
2042  computeKnownBits(X, LHSBits, Depth, Q);
2043 
2044  KnownBits RHSBits(BitWidth);
2045  computeKnownBits(Y, RHSBits, Depth, Q);
2046  // If i8 V is a power of two or zero:
2047  // ZeroBits: 1 1 1 0 1 1 1 1
2048  // ~ZeroBits: 0 0 0 1 0 0 0 0
2049  if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2050  // If OrZero isn't set, we cannot give back a zero result.
2051  // Make sure either the LHS or RHS has a bit set.
2052  if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2053  return true;
2054  }
2055  }
2056 
2057  // An exact divide or right shift can only shift off zero bits, so the result
2058  // is a power of two only if the first operand is a power of two and not
2059  // copying a sign bit (sdiv int_min, 2).
2060  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2061  match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2062  return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2063  Depth, Q);
2064  }
2065 
2066  return false;
2067 }
2068 
2069 /// Test whether a GEP's result is known to be non-null.
2070 ///
2071 /// Uses properties inherent in a GEP to try to determine whether it is known
2072 /// to be non-null.
2073 ///
2074 /// Currently this routine does not support vector GEPs.
2075 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2076  const Query &Q) {
2077  const Function *F = nullptr;
2078  if (const Instruction *I = dyn_cast<Instruction>(GEP))
2079  F = I->getFunction();
2080 
2081  if (!GEP->isInBounds() ||
2082  NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2083  return false;
2084 
2085  // FIXME: Support vector-GEPs.
2086  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2087 
2088  // If the base pointer is non-null, we cannot walk to a null address with an
2089  // inbounds GEP in address space zero.
2090  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2091  return true;
2092 
2093  // Walk the GEP operands and see if any operand introduces a non-zero offset.
2094  // If so, then the GEP cannot produce a null pointer, as doing so would
2095  // inherently violate the inbounds contract within address space zero.
2096  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2097  GTI != GTE; ++GTI) {
2098  // Struct types are easy -- they must always be indexed by a constant.
2099  if (StructType *STy = GTI.getStructTypeOrNull()) {
2100  ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2101  unsigned ElementIdx = OpC->getZExtValue();
2102  const StructLayout *SL = Q.DL.getStructLayout(STy);
2103  uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2104  if (ElementOffset > 0)
2105  return true;
2106  continue;
2107  }
2108 
2109  // If we have a zero-sized type, the index doesn't matter. Keep looping.
2110  if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2111  continue;
2112 
2113  // Fast path the constant operand case both for efficiency and so we don't
2114  // increment Depth when just zipping down an all-constant GEP.
2115  if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2116  if (!OpC->isZero())
2117  return true;
2118  continue;
2119  }
2120 
2121  // We post-increment Depth here because while isKnownNonZero increments it
2122  // as well, when we pop back up that increment won't persist. We don't want
2123  // to recurse 10k times just because we have 10k GEP operands. We don't
2124  // bail completely out because we want to handle constant GEPs regardless
2125  // of depth.
2127  continue;
2128 
2129  if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2130  return true;
2131  }
2132 
2133  return false;
2134 }
2135 
2137  const Instruction *CtxI,
2138  const DominatorTree *DT) {
2139  if (isa<Constant>(V))
2140  return false;
2141 
2142  if (!CtxI || !DT)
2143  return false;
2144 
2145  unsigned NumUsesExplored = 0;
2146  for (auto *U : V->users()) {
2147  // Avoid massive lists
2148  if (NumUsesExplored >= DomConditionsMaxUses)
2149  break;
2150  NumUsesExplored++;
2151 
2152  // If the value is used as an argument to a call or invoke, then argument
2153  // attributes may provide an answer about null-ness.
2154  if (const auto *CB = dyn_cast<CallBase>(U))
2155  if (auto *CalledFunc = CB->getCalledFunction())
2156  for (const Argument &Arg : CalledFunc->args())
2157  if (CB->getArgOperand(Arg.getArgNo()) == V &&
2158  Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2159  DT->dominates(CB, CtxI))
2160  return true;
2161 
2162  // If the value is used as a load/store, then the pointer must be non null.
2163  if (V == getLoadStorePointerOperand(U)) {
2164  const Instruction *I = cast<Instruction>(U);
2165  if (!NullPointerIsDefined(I->getFunction(),
2166  V->getType()->getPointerAddressSpace()) &&
2167  DT->dominates(I, CtxI))
2168  return true;
2169  }
2170 
2171  // Consider only compare instructions uniquely controlling a branch
2172  Value *RHS;
2173  CmpInst::Predicate Pred;
2174  if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2175  continue;
2176 
2177  bool NonNullIfTrue;
2178  if (cmpExcludesZero(Pred, RHS))
2179  NonNullIfTrue = true;
2180  else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2181  NonNullIfTrue = false;
2182  else
2183  continue;
2184 
2187  for (auto *CmpU : U->users()) {
2188  assert(WorkList.empty() && "Should be!");
2189  if (Visited.insert(CmpU).second)
2190  WorkList.push_back(CmpU);
2191 
2192  while (!WorkList.empty()) {
2193  auto *Curr = WorkList.pop_back_val();
2194 
2195  // If a user is an AND, add all its users to the work list. We only
2196  // propagate "pred != null" condition through AND because it is only
2197  // correct to assume that all conditions of AND are met in true branch.
2198  // TODO: Support similar logic of OR and EQ predicate?
2199  if (NonNullIfTrue)
2200  if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2201  for (auto *CurrU : Curr->users())
2202  if (Visited.insert(CurrU).second)
2203  WorkList.push_back(CurrU);
2204  continue;
2205  }
2206 
2207  if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2208  assert(BI->isConditional() && "uses a comparison!");
2209 
2210  BasicBlock *NonNullSuccessor =
2211  BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2212  BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2213  if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2214  return true;
2215  } else if (NonNullIfTrue && isGuard(Curr) &&
2216  DT->dominates(cast<Instruction>(Curr), CtxI)) {
2217  return true;
2218  }
2219  }
2220  }
2221  }
2222 
2223  return false;
2224 }
2225 
2226 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2227 /// ensure that the value it's attached to is never Value? 'RangeType' is
2228 /// is the type of the value described by the range.
2229 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2230  const unsigned NumRanges = Ranges->getNumOperands() / 2;
2231  assert(NumRanges >= 1);
2232  for (unsigned i = 0; i < NumRanges; ++i) {
2233  ConstantInt *Lower =
2234  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2235  ConstantInt *Upper =
2236  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2237  ConstantRange Range(Lower->getValue(), Upper->getValue());
2238  if (Range.contains(Value))
2239  return false;
2240  }
2241  return true;
2242 }
2243 
2244 /// Try to detect a recurrence that monotonically increases/decreases from a
2245 /// non-zero starting value. These are common as induction variables.
2246 static bool isNonZeroRecurrence(const PHINode *PN) {
2247  BinaryOperator *BO = nullptr;
2248  Value *Start = nullptr, *Step = nullptr;
2249  const APInt *StartC, *StepC;
2250  if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2251  !match(Start, m_APInt(StartC)) || StartC->isNullValue())
2252  return false;
2253 
2254  switch (BO->getOpcode()) {
2255  case Instruction::Add:
2256  // Starting from non-zero and stepping away from zero can never wrap back
2257  // to zero.
2258  return BO->hasNoUnsignedWrap() ||
2259  (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2260  StartC->isNegative() == StepC->isNegative());
2261  case Instruction::Mul:
2262  return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2263  match(Step, m_APInt(StepC)) && !StepC->isNullValue();
2264  case Instruction::Shl:
2265  return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2266  case Instruction::AShr:
2267  case Instruction::LShr:
2268  return BO->isExact();
2269  default:
2270  return false;
2271  }
2272 }
2273 
2274 /// Return true if the given value is known to be non-zero when defined. For
2275 /// vectors, return true if every demanded element is known to be non-zero when
2276 /// defined. For pointers, if the context instruction and dominator tree are
2277 /// specified, perform context-sensitive analysis and return true if the
2278 /// pointer couldn't possibly be null at the specified instruction.
2279 /// Supports values with integer or pointer type and vectors of integers.
2280 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2281  const Query &Q) {
2282  // FIXME: We currently have no way to represent the DemandedElts of a scalable
2283  // vector
2284  if (isa<ScalableVectorType>(V->getType()))
2285  return false;
2286 
2287  if (auto *C = dyn_cast<Constant>(V)) {
2288  if (C->isNullValue())
2289  return false;
2290  if (isa<ConstantInt>(C))
2291  // Must be non-zero due to null test above.
2292  return true;
2293 
2294  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2295  // See the comment for IntToPtr/PtrToInt instructions below.
2296  if (CE->getOpcode() == Instruction::IntToPtr ||
2297  CE->getOpcode() == Instruction::PtrToInt)
2298  if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2299  .getFixedSize() <=
2300  Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2301  return isKnownNonZero(CE->getOperand(0), Depth, Q);
2302  }
2303 
2304  // For constant vectors, check that all elements are undefined or known
2305  // non-zero to determine that the whole vector is known non-zero.
2306  if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2307  for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2308  if (!DemandedElts[i])
2309  continue;
2310  Constant *Elt = C->getAggregateElement(i);
2311  if (!Elt || Elt->isNullValue())
2312  return false;
2313  if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2314  return false;
2315  }
2316  return true;
2317  }
2318 
2319  // A global variable in address space 0 is non null unless extern weak
2320  // or an absolute symbol reference. Other address spaces may have null as a
2321  // valid address for a global, so we can't assume anything.
2322  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2323  if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2324  GV->getType()->getAddressSpace() == 0)
2325  return true;
2326  } else
2327  return false;
2328  }
2329 
2330  if (auto *I = dyn_cast<Instruction>(V)) {
2331  if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2332  // If the possible ranges don't contain zero, then the value is
2333  // definitely non-zero.
2334  if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2335  const APInt ZeroValue(Ty->getBitWidth(), 0);
2336  if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2337  return true;
2338  }
2339  }
2340  }
2341 
2342  if (isKnownNonZeroFromAssume(V, Q))
2343  return true;
2344 
2345  // Some of the tests below are recursive, so bail out if we hit the limit.
2347  return false;
2348 
2349  // Check for pointer simplifications.
2350 
2351  if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2352  // Alloca never returns null, malloc might.
2353  if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2354  return true;
2355 
2356  // A byval, inalloca may not be null in a non-default addres space. A
2357  // nonnull argument is assumed never 0.
2358  if (const Argument *A = dyn_cast<Argument>(V)) {
2359  if (((A->hasPassPointeeByValueCopyAttr() &&
2360  !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2361  A->hasNonNullAttr()))
2362  return true;
2363  }
2364 
2365  // A Load tagged with nonnull metadata is never null.
2366  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2367  if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2368  return true;
2369 
2370  if (const auto *Call = dyn_cast<CallBase>(V)) {
2371  if (Call->isReturnNonNull())
2372  return true;
2373  if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2374  return isKnownNonZero(RP, Depth, Q);
2375  }
2376  }
2377 
2378  if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2379  return true;
2380 
2381  // Check for recursive pointer simplifications.
2382  if (V->getType()->isPointerTy()) {
2383  // Look through bitcast operations, GEPs, and int2ptr instructions as they
2384  // do not alter the value, or at least not the nullness property of the
2385  // value, e.g., int2ptr is allowed to zero/sign extend the value.
2386  //
2387  // Note that we have to take special care to avoid looking through
2388  // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2389  // as casts that can alter the value, e.g., AddrSpaceCasts.
2390  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2391  return isGEPKnownNonNull(GEP, Depth, Q);
2392 
2393  if (auto *BCO = dyn_cast<BitCastOperator>(V))
2394  return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2395 
2396  if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2397  if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2398  Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2399  return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2400  }
2401 
2402  // Similar to int2ptr above, we can look through ptr2int here if the cast
2403  // is a no-op or an extend and not a truncate.
2404  if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2405  if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2406  Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2407  return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2408 
2409  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2410 
2411  // X | Y != 0 if X != 0 or Y != 0.
2412  Value *X = nullptr, *Y = nullptr;
2413  if (match(V, m_Or(m_Value(X), m_Value(Y))))
2414  return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2415  isKnownNonZero(Y, DemandedElts, Depth, Q);
2416 
2417  // ext X != 0 if X != 0.
2418  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2419  return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2420 
2421  // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2422  // if the lowest bit is shifted off the end.
2423  if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2424  // shl nuw can't remove any non-zero bits.
2425  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2426  if (Q.IIQ.hasNoUnsignedWrap(BO))
2427  return isKnownNonZero(X, Depth, Q);
2428 
2429  KnownBits Known(BitWidth);
2430  computeKnownBits(X, DemandedElts, Known, Depth, Q);
2431  if (Known.One[0])
2432  return true;
2433  }
2434  // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2435  // defined if the sign bit is shifted off the end.
2436  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2437  // shr exact can only shift out zero bits.
2438  const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2439  if (BO->isExact())
2440  return isKnownNonZero(X, Depth, Q);
2441 
2442  KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2443  if (Known.isNegative())
2444  return true;
2445 
2446  // If the shifter operand is a constant, and all of the bits shifted
2447  // out are known to be zero, and X is known non-zero then at least one
2448  // non-zero bit must remain.
2449  if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2450  auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2451  // Is there a known one in the portion not shifted out?
2452  if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2453  return true;
2454  // Are all the bits to be shifted out known zero?
2455  if (Known.countMinTrailingZeros() >= ShiftVal)
2456  return isKnownNonZero(X, DemandedElts, Depth, Q);
2457  }
2458  }
2459  // div exact can only produce a zero if the dividend is zero.
2460  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2461  return isKnownNonZero(X, DemandedElts, Depth, Q);
2462  }
2463  // X + Y.
2464  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2465  KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2466  KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2467 
2468  // If X and Y are both non-negative (as signed values) then their sum is not
2469  // zero unless both X and Y are zero.
2470  if (XKnown.isNonNegative() && YKnown.isNonNegative())
2471  if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2472  isKnownNonZero(Y, DemandedElts, Depth, Q))
2473  return true;
2474 
2475  // If X and Y are both negative (as signed values) then their sum is not
2476  // zero unless both X and Y equal INT_MIN.
2477  if (XKnown.isNegative() && YKnown.isNegative()) {
2479  // The sign bit of X is set. If some other bit is set then X is not equal
2480  // to INT_MIN.
2481  if (XKnown.One.intersects(Mask))
2482  return true;
2483  // The sign bit of Y is set. If some other bit is set then Y is not equal
2484  // to INT_MIN.
2485  if (YKnown.One.intersects(Mask))
2486  return true;
2487  }
2488 
2489  // The sum of a non-negative number and a power of two is not zero.
2490  if (XKnown.isNonNegative() &&
2491  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2492  return true;
2493  if (YKnown.isNonNegative() &&
2494  isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2495  return true;
2496  }
2497  // X * Y.
2498  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2499  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2500  // If X and Y are non-zero then so is X * Y as long as the multiplication
2501  // does not overflow.
2502  if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2503  isKnownNonZero(X, DemandedElts, Depth, Q) &&
2504  isKnownNonZero(Y, DemandedElts, Depth, Q))
2505  return true;
2506  }
2507  // (C ? X : Y) != 0 if X != 0 and Y != 0.
2508  else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2509  if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2510  isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2511  return true;
2512  }
2513  // PHI
2514  else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2515  if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2516  return true;
2517 
2518  // Check if all incoming values are non-zero using recursion.
2519  Query RecQ = Q;
2520  unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2521  return llvm::all_of(PN->operands(), [&](const Use &U) {
2522  if (U.get() == PN)
2523  return true;
2524  RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2525  return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2526  });
2527  }
2528  // ExtractElement
2529  else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2530  const Value *Vec = EEI->getVectorOperand();
2531  const Value *Idx = EEI->getIndexOperand();
2532  auto *CIdx = dyn_cast<ConstantInt>(Idx);
2533  if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2534  unsigned NumElts = VecTy->getNumElements();
2535  APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2536  if (CIdx && CIdx->getValue().ult(NumElts))
2537  DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2538  return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2539  }
2540  }
2541  // Freeze
2542  else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2543  auto *Op = FI->getOperand(0);
2544  if (isKnownNonZero(Op, Depth, Q) &&
2545  isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2546  return true;
2547  }
2548 
2549  KnownBits Known(BitWidth);
2550  computeKnownBits(V, DemandedElts, Known, Depth, Q);
2551  return Known.One != 0;
2552 }
2553 
2554 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2555  // FIXME: We currently have no way to represent the DemandedElts of a scalable
2556  // vector
2557  if (isa<ScalableVectorType>(V->getType()))
2558  return false;
2559 
2560  auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2561  APInt DemandedElts =
2562  FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2563  return isKnownNonZero(V, DemandedElts, Depth, Q);
2564 }
2565 
2566 /// If the pair of operators are the same invertible function, return the
2567 /// the operands of the function corresponding to each input. Otherwise,
2568 /// return None. An invertible function is one that is 1-to-1 and maps
2569 /// every input value to exactly one output value. This is equivalent to
2570 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2571 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2574  const Operator *Op2) {
2575  if (Op1->getOpcode() != Op2->getOpcode())
2576  return None;
2577 
2578  auto getOperands = [&](unsigned OpNum) -> auto {
2579  return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2580  };
2581 
2582  switch (Op1->getOpcode()) {
2583  default:
2584  break;
2585  case Instruction::Add:
2586  case Instruction::Sub:
2587  if (Op1->getOperand(0) == Op2->getOperand(0))
2588  return getOperands(1);
2589  if (Op1->getOperand(1) == Op2->getOperand(1))
2590  return getOperands(0);
2591  break;
2592  case Instruction::Mul: {
2593  // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2594  // and N is the bitwdith. The nsw case is non-obvious, but proven by
2595  // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2596  auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2597  auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2598  if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2599  (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2600  break;
2601 
2602  // Assume operand order has been canonicalized
2603  if (Op1->getOperand(1) == Op2->getOperand(1) &&
2604  isa<ConstantInt>(Op1->getOperand(1)) &&
2605  !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2606  return getOperands(0);
2607  break;
2608  }
2609  case Instruction::Shl: {
2610  // Same as multiplies, with the difference that we don't need to check
2611  // for a non-zero multiply. Shifts always multiply by non-zero.
2612  auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2613  auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2614  if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2615  (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2616  break;
2617 
2618  if (Op1->getOperand(1) == Op2->getOperand(1))
2619  return getOperands(0);
2620  break;
2621  }
2622  case Instruction::AShr:
2623  case Instruction::LShr: {
2624  auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2625  auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2626  if (!PEO1->isExact() || !PEO2->isExact())
2627  break;
2628 
2629  if (Op1->getOperand(1) == Op2->getOperand(1))
2630  return getOperands(0);
2631  break;
2632  }
2633  case Instruction::SExt:
2634  case Instruction::ZExt:
2635  if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2636  return getOperands(0);
2637  break;
2638  case Instruction::PHI: {
2639  const PHINode *PN1 = cast<PHINode>(Op1);
2640  const PHINode *PN2 = cast<PHINode>(Op2);
2641 
2642  // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2643  // are a single invertible function of the start values? Note that repeated
2644  // application of an invertible function is also invertible
2645  BinaryOperator *BO1 = nullptr;
2646  Value *Start1 = nullptr, *Step1 = nullptr;
2647  BinaryOperator *BO2 = nullptr;
2648  Value *Start2 = nullptr, *Step2 = nullptr;
2649  if (PN1->getParent() != PN2->getParent() ||
2650  !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2651  !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2652  break;
2653 
2654  auto Values = getInvertibleOperands(cast<Operator>(BO1),
2655  cast<Operator>(BO2));
2656  if (!Values)
2657  break;
2658 
2659  // We have to be careful of mutually defined recurrences here. Ex:
2660  // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2661  // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2662  // The invertibility of these is complicated, and not worth reasoning
2663  // about (yet?).
2664  if (Values->first != PN1 || Values->second != PN2)
2665  break;
2666 
2667  return std::make_pair(Start1, Start2);
2668  }
2669  }
2670  return None;
2671 }
2672 
2673 /// Return true if V2 == V1 + X, where X is known non-zero.
2674 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2675  const Query &Q) {
2676  const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2677  if (!BO || BO->getOpcode() != Instruction::Add)
2678  return false;
2679  Value *Op = nullptr;
2680  if (V2 == BO->getOperand(0))
2681  Op = BO->getOperand(1);
2682  else if (V2 == BO->getOperand(1))
2683  Op = BO->getOperand(0);
2684  else
2685  return false;
2686  return isKnownNonZero(Op, Depth + 1, Q);
2687 }
2688 
2689 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2690 /// the multiplication is nuw or nsw.
2691 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2692  const Query &Q) {
2693  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2694  const APInt *C;
2695  return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2696  (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2697  !C->isNullValue() && !C->isOneValue() &&
2698  isKnownNonZero(V1, Depth + 1, Q);
2699  }
2700  return false;
2701 }
2702 
2703 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2704 /// the shift is nuw or nsw.
2705 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2706  const Query &Q) {
2707  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2708  const APInt *C;
2709  return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2710  (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2711  !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q);
2712  }
2713  return false;
2714 }
2715 
2716 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2717  unsigned Depth, const Query &Q) {
2718  // Check two PHIs are in same block.
2719  if (PN1->getParent() != PN2->getParent())
2720  return false;
2721 
2723  bool UsedFullRecursion = false;
2724  for (const BasicBlock *IncomBB : PN1->blocks()) {
2725  if (!VisitedBBs.insert(IncomBB).second)
2726  continue; // Don't reprocess blocks that we have dealt with already.
2727  const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2728  const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2729  const APInt *C1, *C2;
2730  if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2731  continue;
2732 
2733  // Only one pair of phi operands is allowed for full recursion.
2734  if (UsedFullRecursion)
2735  return false;
2736 
2737  Query RecQ = Q;
2738  RecQ.CxtI = IncomBB->getTerminator();
2739  if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2740  return false;
2741  UsedFullRecursion = true;
2742  }
2743  return true;
2744 }
2745 
2746 /// Return true if it is known that V1 != V2.
2747 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2748  const Query &Q) {
2749  if (V1 == V2)
2750  return false;
2751  if (V1->getType() != V2->getType())
2752  // We can't look through casts yet.
2753  return false;
2754 
2756  return false;
2757 
2758  // See if we can recurse through (exactly one of) our operands. This
2759  // requires our operation be 1-to-1 and map every input value to exactly
2760  // one output value. Such an operation is invertible.
2761  auto *O1 = dyn_cast<Operator>(V1);
2762  auto *O2 = dyn_cast<Operator>(V2);
2763  if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2764  if (auto Values = getInvertibleOperands(O1, O2))
2765  return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2766 
2767  if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2768  const PHINode *PN2 = cast<PHINode>(V2);
2769  // FIXME: This is missing a generalization to handle the case where one is
2770  // a PHI and another one isn't.
2771  if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2772  return true;
2773  };
2774  }
2775 
2776  if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2777  return true;
2778 
2779  if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2780  return true;
2781 
2782  if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2783  return true;
2784 
2785  if (V1->getType()->isIntOrIntVectorTy()) {
2786  // Are any known bits in V1 contradictory to known bits in V2? If V1
2787  // has a known zero where V2 has a known one, they must not be equal.
2788  KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2789  KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2790 
2791  if (Known1.Zero.intersects(Known2.One) ||
2792  Known2.Zero.intersects(Known1.One))
2793  return true;
2794  }
2795  return false;
2796 }
2797 
2798 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2799 /// simplify operations downstream. Mask is known to be zero for bits that V
2800 /// cannot have.
2801 ///
2802 /// This function is defined on values with integer type, values with pointer
2803 /// type, and vectors of integers. In the case
2804 /// where V is a vector, the mask, known zero, and known one values are the
2805 /// same width as the vector element, and the bit is set only if it is true
2806 /// for all of the elements in the vector.
2807 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2808  const Query &Q) {
2809  KnownBits Known(Mask.getBitWidth());
2810  computeKnownBits(V, Known, Depth, Q);
2811  return Mask.isSubsetOf(Known.Zero);
2812 }
2813 
2814 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2815 // Returns the input and lower/upper bounds.
2816 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2817  const APInt *&CLow, const APInt *&CHigh) {
2818  assert(isa<Operator>(Select) &&
2819  cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2820  "Input should be a Select!");
2821 
2822  const Value *LHS = nullptr, *RHS = nullptr;
2824  if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2825  return false;
2826 
2827  if (!match(RHS, m_APInt(CLow)))
2828  return false;
2829 
2830  const Value *LHS2 = nullptr, *RHS2 = nullptr;
2831  SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2832  if (getInverseMinMaxFlavor(SPF) != SPF2)
2833  return false;
2834 
2835  if (!match(RHS2, m_APInt(CHigh)))
2836  return false;
2837 
2838  if (SPF == SPF_SMIN)
2839  std::swap(CLow, CHigh);
2840 
2841  In = LHS2;
2842  return CLow->sle(*CHigh);
2843 }
2844 
2845 /// For vector constants, loop over the elements and find the constant with the
2846 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2847 /// or if any element was not analyzed; otherwise, return the count for the
2848 /// element with the minimum number of sign bits.
2849 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2850  const APInt &DemandedElts,
2851  unsigned TyBits) {
2852  const auto *CV = dyn_cast<Constant>(V);
2853  if (!CV || !isa<FixedVectorType>(CV->getType()))
2854  return 0;
2855 
2856  unsigned MinSignBits = TyBits;
2857  unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2858  for (unsigned i = 0; i != NumElts; ++i) {
2859  if (!DemandedElts[i])
2860  continue;
2861  // If we find a non-ConstantInt, bail out.
2862  auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2863  if (!Elt)
2864  return 0;
2865 
2866  MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2867  }
2868 
2869  return MinSignBits;
2870 }
2871 
2872 static unsigned ComputeNumSignBitsImpl(const Value *V,
2873  const APInt &DemandedElts,
2874  unsigned Depth, const Query &Q);
2875 
2876 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2877  unsigned Depth, const Query &Q) {
2878  unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2879  assert(Result > 0 && "At least one sign bit needs to be present!");
2880  return Result;
2881 }
2882 
2883 /// Return the number of times the sign bit of the register is replicated into
2884 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2885 /// (itself), but other cases can give us information. For example, immediately
2886 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2887 /// other, so we return 3. For vectors, return the number of sign bits for the
2888 /// vector element with the minimum number of known sign bits of the demanded
2889 /// elements in the vector specified by DemandedElts.
2890 static unsigned ComputeNumSignBitsImpl(const Value *V,
2891  const APInt &DemandedElts,
2892  unsigned Depth, const Query &Q) {
2893  Type *Ty = V->getType();
2894 
2895  // FIXME: We currently have no way to represent the DemandedElts of a scalable
2896  // vector
2897  if (isa<ScalableVectorType>(Ty))
2898  return 1;
2899 
2900 #ifndef NDEBUG
2901  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2902 
2903  if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2904  assert(
2905  FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2906  "DemandedElt width should equal the fixed vector number of elements");
2907  } else {
2908  assert(DemandedElts == APInt(1, 1) &&
2909  "DemandedElt width should be 1 for scalars");
2910  }
2911 #endif
2912 
2913  // We return the minimum number of sign bits that are guaranteed to be present
2914  // in V, so for undef we have to conservatively return 1. We don't have the
2915  // same behavior for poison though -- that's a FIXME today.
2916 
2917  Type *ScalarTy = Ty->getScalarType();
2918  unsigned TyBits = ScalarTy->isPointerTy() ?
2919  Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2920  Q.DL.getTypeSizeInBits(ScalarTy);
2921 
2922  unsigned Tmp, Tmp2;
2923  unsigned FirstAnswer = 1;
2924 
2925  // Note that ConstantInt is handled by the general computeKnownBits case
2926  // below.
2927 
2929  return 1;
2930 
2931  if (auto *U = dyn_cast<Operator>(V)) {
2932  switch (Operator::getOpcode(V)) {
2933  default: break;
2934  case Instruction::SExt:
2935  Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2936  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2937 
2938  case Instruction::SDiv: {
2939  const APInt *Denominator;
2940  // sdiv X, C -> adds log(C) sign bits.
2941  if (match(U->getOperand(1), m_APInt(Denominator))) {
2942 
2943  // Ignore non-positive denominator.
2944  if (!Denominator->isStrictlyPositive())
2945  break;
2946 
2947  // Calculate the incoming numerator bits.
2948  unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2949 
2950  // Add floor(log(C)) bits to the numerator bits.
2951  return std::min(TyBits, NumBits + Denominator->logBase2());
2952  }
2953  break;
2954  }
2955 
2956  case Instruction::SRem: {
2957  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2958 
2959  const APInt *Denominator;
2960  // srem X, C -> we know that the result is within [-C+1,C) when C is a
2961  // positive constant. This let us put a lower bound on the number of sign
2962  // bits.
2963  if (match(U->getOperand(1), m_APInt(Denominator))) {
2964 
2965  // Ignore non-positive denominator.
2966  if (Denominator->isStrictlyPositive()) {
2967  // Calculate the leading sign bit constraints by examining the
2968  // denominator. Given that the denominator is positive, there are two
2969  // cases:
2970  //
2971  // 1. The numerator is positive. The result range is [0,C) and
2972  // [0,C) u< (1 << ceilLogBase2(C)).
2973  //
2974  // 2. The numerator is negative. Then the result range is (-C,0] and
2975  // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2976  //
2977  // Thus a lower bound on the number of sign bits is `TyBits -
2978  // ceilLogBase2(C)`.
2979 
2980  unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2981  Tmp = std::max(Tmp, ResBits);
2982  }
2983  }
2984  return Tmp;
2985  }
2986 
2987  case Instruction::AShr: {
2988  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2989  // ashr X, C -> adds C sign bits. Vectors too.
2990  const APInt *ShAmt;
2991  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2992  if (ShAmt->uge(TyBits))
2993  break; // Bad shift.
2994  unsigned ShAmtLimited = ShAmt->getZExtValue();
2995  Tmp += ShAmtLimited;
2996  if (Tmp > TyBits) Tmp = TyBits;
2997  }
2998  return Tmp;
2999  }
3000  case Instruction::Shl: {
3001  const APInt *ShAmt;
3002  if (match(U->getOperand(1), m_APInt(ShAmt))) {
3003  // shl destroys sign bits.
3004  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3005  if (ShAmt->uge(TyBits) || // Bad shift.
3006  ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3007  Tmp2 = ShAmt->getZExtValue();
3008  return Tmp - Tmp2;
3009  }
3010  break;
3011  }
3012  case Instruction::And:
3013  case Instruction::Or:
3014  case Instruction::Xor: // NOT is handled here.
3015  // Logical binary ops preserve the number of sign bits at the worst.
3016  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3017  if (Tmp != 1) {
3018  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3019  FirstAnswer = std::min(Tmp, Tmp2);
3020  // We computed what we know about the sign bits as our first
3021  // answer. Now proceed to the generic code that uses
3022  // computeKnownBits, and pick whichever answer is better.
3023  }
3024  break;
3025 
3026  case Instruction::Select: {
3027  // If we have a clamp pattern, we know that the number of sign bits will
3028  // be the minimum of the clamp min/max range.
3029  const Value *X;
3030  const APInt *CLow, *CHigh;
3031  if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3032  return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3033 
3034  Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3035  if (Tmp == 1) break;
3036  Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3037  return std::min(Tmp, Tmp2);
3038  }
3039 
3040  case Instruction::Add:
3041  // Add can have at most one carry bit. Thus we know that the output
3042  // is, at worst, one more bit than the inputs.
3043  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3044  if (Tmp == 1) break;
3045 
3046  // Special case decrementing a value (ADD X, -1):
3047  if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3048  if (CRHS->isAllOnesValue()) {
3049  KnownBits Known(TyBits);
3050  computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3051 
3052  // If the input is known to be 0 or 1, the output is 0/-1, which is
3053  // all sign bits set.
3054  if ((Known.Zero | 1).isAllOnesValue())
3055  return TyBits;
3056 
3057  // If we are subtracting one from a positive number, there is no carry
3058  // out of the result.
3059  if (Known.isNonNegative())
3060  return Tmp;
3061  }
3062 
3063  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3064  if (Tmp2 == 1) break;
3065  return std::min(Tmp, Tmp2) - 1;
3066 
3067  case Instruction::Sub:
3068  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3069  if (Tmp2 == 1) break;
3070 
3071  // Handle NEG.
3072  if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3073  if (CLHS->isNullValue()) {
3074  KnownBits Known(TyBits);
3075  computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3076  // If the input is known to be 0 or 1, the output is 0/-1, which is
3077  // all sign bits set.
3078  if ((Known.Zero | 1).isAllOnesValue())
3079  return TyBits;
3080 
3081  // If the input is known to be positive (the sign bit is known clear),
3082  // the output of the NEG has the same number of sign bits as the
3083  // input.
3084  if (Known.isNonNegative())
3085  return Tmp2;
3086 
3087  // Otherwise, we treat this like a SUB.
3088  }
3089 
3090  // Sub can have at most one carry bit. Thus we know that the output
3091  // is, at worst, one more bit than the inputs.
3092  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3093  if (Tmp == 1) break;
3094  return std::min(Tmp, Tmp2) - 1;
3095 
3096  case Instruction::Mul: {
3097  // The output of the Mul can be at most twice the valid bits in the
3098  // inputs.
3099  unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3100  if (SignBitsOp0 == 1) break;
3101  unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3102  if (SignBitsOp1 == 1) break;
3103  unsigned OutValidBits =
3104  (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3105  return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3106  }
3107 
3108  case Instruction::PHI: {
3109  const PHINode *PN = cast<PHINode>(U);
3110  unsigned NumIncomingValues = PN->getNumIncomingValues();
3111  // Don't analyze large in-degree PHIs.
3112  if (NumIncomingValues > 4) break;
3113  // Unreachable blocks may have zero-operand PHI nodes.
3114  if (NumIncomingValues == 0) break;
3115 
3116  // Take the minimum of all incoming values. This can't infinitely loop
3117  // because of our depth threshold.
3118  Query RecQ = Q;
3119  Tmp = TyBits;
3120  for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3121  if (Tmp == 1) return Tmp;
3122  RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3123  Tmp = std::min(
3124  Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3125  }
3126  return Tmp;
3127  }
3128 
3129  case Instruction::Trunc:
3130  // FIXME: it's tricky to do anything useful for this, but it is an
3131  // important case for targets like X86.
3132  break;
3133 
3134  case Instruction::ExtractElement:
3135  // Look through extract element. At the moment we keep this simple and
3136  // skip tracking the specific element. But at least we might find
3137  // information valid for all elements of the vector (for example if vector
3138  // is sign extended, shifted, etc).
3139  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3140 
3141  case Instruction::ShuffleVector: {
3142  // Collect the minimum number of sign bits that are shared by every vector
3143  // element referenced by the shuffle.
3144  auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3145  if (!Shuf) {
3146  // FIXME: Add support for shufflevector constant expressions.
3147  return 1;
3148  }
3149  APInt DemandedLHS, DemandedRHS;
3150  // For undef elements, we don't know anything about the common state of
3151  // the shuffle result.
3152  if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3153  return 1;
3155  if (!!DemandedLHS) {
3156  const Value *LHS = Shuf->getOperand(0);
3157  Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3158  }
3159  // If we don't know anything, early out and try computeKnownBits
3160  // fall-back.
3161  if (Tmp == 1)
3162  break;
3163  if (!!DemandedRHS) {
3164  const Value *RHS = Shuf->getOperand(1);
3165  Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3166  Tmp = std::min(Tmp, Tmp2);
3167  }
3168  // If we don't know anything, early out and try computeKnownBits
3169  // fall-back.
3170  if (Tmp == 1)
3171  break;
3172  assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3173  return Tmp;
3174  }
3175  case Instruction::Call: {
3176  if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3177  switch (II->getIntrinsicID()) {
3178  default: break;
3179  case Intrinsic::abs:
3180  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3181  if (Tmp == 1) break;
3182 
3183  // Absolute value reduces number of sign bits by at most 1.
3184  return Tmp - 1;
3185  }
3186  }
3187  }
3188  }
3189  }
3190 
3191  // Finally, if we can prove that the top bits of the result are 0's or 1's,
3192  // use this information.
3193 
3194  // If we can examine all elements of a vector constant successfully, we're
3195  // done (we can't do any better than that). If not, keep trying.
3196  if (unsigned VecSignBits =
3197  computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3198  return VecSignBits;
3199 
3200  KnownBits Known(TyBits);
3201  computeKnownBits(V, DemandedElts, Known, Depth, Q);
3202 
3203  // If we know that the sign bit is either zero or one, determine the number of
3204  // identical bits in the top of the input value.
3205  return std::max(FirstAnswer, Known.countMinSignBits());
3206 }
3207 
3208 /// This function computes the integer multiple of Base that equals V.
3209 /// If successful, it returns true and returns the multiple in
3210 /// Multiple. If unsuccessful, it returns false. It looks
3211 /// through SExt instructions only if LookThroughSExt is true.
3212 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3213  bool LookThroughSExt, unsigned Depth) {
3214  assert(V && "No Value?");
3215  assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3216  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3217 
3218  Type *T = V->getType();
3219 
3220  ConstantInt *CI = dyn_cast<ConstantInt>(V);
3221 
3222  if (Base == 0)
3223  return false;
3224 
3225  if (Base == 1) {
3226  Multiple = V;
3227  return true;
3228  }
3229 
3230  ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3231  Constant *BaseVal = ConstantInt::get(T, Base);
3232  if (CO && CO == BaseVal) {
3233  // Multiple is 1.
3234  Multiple = ConstantInt::get(T, 1);
3235  return true;
3236  }
3237 
3238  if (CI && CI->getZExtValue() % Base == 0) {
3239  Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3240  return true;
3241  }
3242 
3243  if (Depth == MaxAnalysisRecursionDepth) return false;
3244 
3245  Operator *I = dyn_cast<Operator>(V);
3246  if (!I) return false;
3247 
3248  switch (I->getOpcode()) {
3249  default: break;
3250  case Instruction::SExt:
3251  if (!LookThroughSExt) return false;
3252  // otherwise fall through to ZExt
3254  case Instruction::ZExt:
3255  return ComputeMultiple(I->getOperand(0), Base, Multiple,
3256  LookThroughSExt, Depth+1);
3257  case Instruction::Shl:
3258  case Instruction::Mul: {
3259  Value *Op0 = I->getOperand(0);
3260  Value *Op1 = I->getOperand(1);
3261 
3262  if (I->getOpcode() == Instruction::Shl) {
3263  ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3264  if (!Op1CI) return false;
3265  // Turn Op0 << Op1 into Op0 * 2^Op1
3266  APInt Op1Int = Op1CI->getValue();
3267  uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3268  APInt API(Op1Int.getBitWidth(), 0);
3269  API.setBit(BitToSet);
3270  Op1 = ConstantInt::get(V->getContext(), API);
3271  }
3272 
3273  Value *Mul0 = nullptr;
3274  if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3275  if (Constant *Op1C = dyn_cast<Constant>(Op1))
3276  if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3277  if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3278  MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3279  Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3280  if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3281  MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3282  MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3283 
3284  // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3285  Multiple = ConstantExpr::getMul(MulC, Op1C);
3286  return true;
3287  }
3288 
3289  if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3290  if (Mul0CI->getValue() == 1) {
3291  // V == Base * Op1, so return Op1
3292  Multiple = Op1;
3293  return true;
3294  }
3295  }
3296 
3297  Value *Mul1 = nullptr;
3298  if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3299  if (Constant *Op0C = dyn_cast<Constant>(Op0))
3300  if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3301  if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3302  MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3303  Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3304  if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3305  MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3306  MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3307 
3308  // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3309  Multiple = ConstantExpr::getMul(MulC, Op0C);
3310  return true;
3311  }
3312 
3313  if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3314  if (Mul1CI->getValue() == 1) {
3315  // V == Base * Op0, so return Op0
3316  Multiple = Op0;
3317  return true;
3318  }
3319  }
3320  }
3321  }
3322 
3323  // We could not determine if V is a multiple of Base.
3324  return false;
3325 }
3326 
3328  const TargetLibraryInfo *TLI) {
3329  const Function *F = CB.getCalledFunction();
3330  if (!F)
3331  return Intrinsic::not_intrinsic;
3332 
3333  if (F->isIntrinsic())
3334  return F->getIntrinsicID();
3335 
3336  // We are going to infer semantics of a library function based on mapping it
3337  // to an LLVM intrinsic. Check that the library function is available from
3338  // this callbase and in this environment.
3339  LibFunc Func;
3340  if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3341  !CB.onlyReadsMemory())
3342  return Intrinsic::not_intrinsic;
3343 
3344  switch (Func) {
3345  default:
3346  break;
3347  case LibFunc_sin:
3348  case LibFunc_sinf:
3349  case LibFunc_sinl:
3350  return Intrinsic::sin;
3351  case LibFunc_cos:
3352  case LibFunc_cosf:
3353  case LibFunc_cosl:
3354  return Intrinsic::cos;
3355  case LibFunc_exp:
3356  case LibFunc_expf:
3357  case LibFunc_expl:
3358  return Intrinsic::exp;
3359  case LibFunc_exp2:
3360  case LibFunc_exp2f:
3361  case LibFunc_exp2l:
3362  return Intrinsic::exp2;
3363  case LibFunc_log:
3364  case LibFunc_logf:
3365  case LibFunc_logl:
3366  return Intrinsic::log;
3367  case LibFunc_log10:
3368  case LibFunc_log10f:
3369  case LibFunc_log10l:
3370  return Intrinsic::log10;
3371  case LibFunc_log2:
3372  case LibFunc_log2f:
3373  case LibFunc_log2l:
3374  return Intrinsic::log2;
3375  case LibFunc_fabs:
3376  case LibFunc_fabsf:
3377  case LibFunc_fabsl:
3378  return Intrinsic::fabs;
3379  case LibFunc_fmin:
3380  case LibFunc_fminf:
3381  case LibFunc_fminl:
3382  return Intrinsic::minnum;
3383  case LibFunc_fmax:
3384  case LibFunc_fmaxf:
3385  case LibFunc_fmaxl:
3386  return Intrinsic::maxnum;
3387  case LibFunc_copysign:
3388  case LibFunc_copysignf:
3389  case LibFunc_copysignl:
3390  return Intrinsic::copysign;
3391  case LibFunc_floor:
3392  case LibFunc_floorf:
3393  case LibFunc_floorl:
3394  return Intrinsic::floor;
3395  case LibFunc_ceil:
3396  case LibFunc_ceilf:
3397  case LibFunc_ceill:
3398  return Intrinsic::ceil;
3399  case LibFunc_trunc:
3400  case LibFunc_truncf:
3401  case LibFunc_truncl:
3402  return Intrinsic::trunc;
3403  case LibFunc_rint:
3404  case LibFunc_rintf:
3405  case LibFunc_rintl:
3406  return Intrinsic::rint;
3407  case LibFunc_nearbyint:
3408  case LibFunc_nearbyintf:
3409  case LibFunc_nearbyintl:
3410  return Intrinsic::nearbyint;
3411  case LibFunc_round:
3412  case LibFunc_roundf:
3413  case LibFunc_roundl:
3414  return Intrinsic::round;
3415  case LibFunc_roundeven:
3416  case LibFunc_roundevenf:
3417  case LibFunc_roundevenl:
3418  return Intrinsic::roundeven;
3419  case LibFunc_pow:
3420  case LibFunc_powf:
3421  case LibFunc_powl:
3422  return Intrinsic::pow;
3423  case LibFunc_sqrt:
3424  case LibFunc_sqrtf:
3425  case LibFunc_sqrtl:
3426  return Intrinsic::sqrt;
3427  }
3428 
3429  return Intrinsic::not_intrinsic;
3430 }
3431 
3432 /// Return true if we can prove that the specified FP value is never equal to
3433 /// -0.0.
3434 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3435 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3436 /// the same as +0.0 in floating-point ops.
3437 ///
3438 /// NOTE: this function will need to be revisited when we support non-default
3439 /// rounding modes!
3441  unsigned Depth) {
3442  if (auto *CFP = dyn_cast<ConstantFP>(V))
3443  return !CFP->getValueAPF().isNegZero();
3444 
3446  return false;
3447 
3448  auto *Op = dyn_cast<Operator>(V);
3449  if (!Op)
3450  return false;
3451 
3452  // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3453  if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3454  return true;
3455 
3456  // sitofp and uitofp turn into +0.0 for zero.
3457  if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3458  return true;
3459 
3460  if (auto *Call = dyn_cast<CallInst>(Op)) {
3461  Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3462  switch (IID) {
3463  default:
3464  break;
3465  // sqrt(-0.0) = -0.0, no other negative results are possible.
3466  case Intrinsic::sqrt:
3467  case Intrinsic::canonicalize:
3468  return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3469  // fabs(x) != -0.0
3470  case Intrinsic::fabs:
3471  return true;
3472  }
3473  }
3474 
3475  return false;
3476 }
3477 
3478 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3479 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3480 /// bit despite comparing equal.
3482  const TargetLibraryInfo *TLI,
3483  bool SignBitOnly,
3484  unsigned Depth) {
3485  // TODO: This function does not do the right thing when SignBitOnly is true
3486  // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3487  // which flips the sign bits of NaNs. See
3488  // https://llvm.org/bugs/show_bug.cgi?id=31702.
3489 
3490  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3491  return !CFP->getValueAPF().isNegative() ||
3492  (!SignBitOnly && CFP->getValueAPF().isZero());
3493  }
3494 
3495  // Handle vector of constants.
3496  if (auto *CV = dyn_cast<Constant>(V)) {
3497  if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3498  unsigned NumElts = CVFVTy->getNumElements();
3499  for (unsigned i = 0; i != NumElts; ++i) {
3500  auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3501  if (!CFP)
3502  return false;
3503  if (CFP->getValueAPF().isNegative() &&
3504  (SignBitOnly || !CFP->getValueAPF().isZero()))
3505  return false;
3506  }
3507 
3508  // All non-negative ConstantFPs.
3509  return true;
3510  }
3511  }
3512 
3514  return false;
3515 
3516  const Operator *I = dyn_cast<Operator>(V);
3517  if (!I)
3518  return false;
3519 
3520  switch (I->getOpcode()) {
3521  default:
3522  break;
3523  // Unsigned integers are always nonnegative.
3524  case Instruction::UIToFP:
3525  return true;
3526  case Instruction::FMul:
3527  case Instruction::FDiv:
3528  // X * X is always non-negative or a NaN.
3529  // X / X is always exactly 1.0 or a NaN.
3530  if (I->getOperand(0) == I->getOperand(1) &&
3531  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3532  return true;
3533 
3535  case Instruction::FAdd:
3536  case Instruction::FRem:
3537  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3538  Depth + 1) &&
3539  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3540  Depth + 1);
3541  case Instruction::Select:
3542  return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3543  Depth + 1) &&
3544  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3545  Depth + 1);
3546  case Instruction::FPExt:
3547  case Instruction::FPTrunc:
3548  // Widening/narrowing never change sign.
3549  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3550  Depth + 1);
3551  case Instruction::ExtractElement:
3552  // Look through extract element. At the moment we keep this simple and skip
3553  // tracking the specific element. But at least we might find information
3554  // valid for all elements of the vector.
3555  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3556  Depth + 1);
3557  case Instruction::Call:
3558  const auto *CI = cast<CallInst>(I);
3559  Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3560  switch (IID) {
3561  default:
3562  break;
3563  case Intrinsic::maxnum: {
3564  Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3565  auto isPositiveNum = [&](Value *V) {
3566  if (SignBitOnly) {
3567  // With SignBitOnly, this is tricky because the result of
3568  // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3569  // a constant strictly greater than 0.0.
3570  const APFloat *C;
3571  return match(V, m_APFloat(C)) &&
3572  *C > APFloat::getZero(C->getSemantics());
3573  }
3574 
3575  // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3576  // maxnum can't be ordered-less-than-zero.
3577  return isKnownNeverNaN(V, TLI) &&
3578  cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3579  };
3580 
3581  // TODO: This could be improved. We could also check that neither operand
3582  // has its sign bit set (and at least 1 is not-NAN?).
3583  return isPositiveNum(V0) || isPositiveNum(V1);
3584  }
3585 
3586  case Intrinsic::maximum:
3587  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3588  Depth + 1) ||
3589  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3590  Depth + 1);
3591  case Intrinsic::minnum:
3592  case Intrinsic::minimum:
3593  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3594  Depth + 1) &&
3595  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3596  Depth + 1);
3597  case Intrinsic::exp:
3598  case Intrinsic::exp2:
3599  case Intrinsic::fabs:
3600  return true;
3601 
3602  case Intrinsic::sqrt:
3603  // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3604  if (!SignBitOnly)
3605  return true;
3606  return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3607  CannotBeNegativeZero(CI->getOperand(0), TLI));
3608 
3609  case Intrinsic::powi:
3610  if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3611  // powi(x,n) is non-negative if n is even.
3612  if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3613  return true;
3614  }
3615  // TODO: This is not correct. Given that exp is an integer, here are the
3616  // ways that pow can return a negative value:
3617  //
3618  // pow(x, exp) --> negative if exp is odd and x is negative.
3619  // pow(-0, exp) --> -inf if exp is negative odd.
3620  // pow(-0, exp) --> -0 if exp is positive odd.
3621  // pow(-inf, exp) --> -0 if exp is negative odd.
3622  // pow(-inf, exp) --> -inf if exp is positive odd.
3623  //
3624  // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3625  // but we must return false if x == -0. Unfortunately we do not currently
3626  // have a way of expressing this constraint. See details in
3627  // https://llvm.org/bugs/show_bug.cgi?id=31702.
3628  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3629  Depth + 1);
3630 
3631  case Intrinsic::fma:
3632  case Intrinsic::fmuladd:
3633  // x*x+y is non-negative if y is non-negative.
3634  return I->getOperand(0) == I->getOperand(1) &&
3635  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3636  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3637  Depth + 1);
3638  }
3639  break;
3640  }
3641  return false;
3642 }
3643 
3645  const TargetLibraryInfo *TLI) {
3646  return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3647 }
3648 
3650  return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3651 }
3652 
3654  unsigned Depth) {
3655  assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3656 
3657  // If we're told that infinities won't happen, assume they won't.
3658  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3659  if (FPMathOp->hasNoInfs())
3660  return true;
3661 
3662  // Handle scalar constants.
3663  if (auto *CFP = dyn_cast<ConstantFP>(V))
3664  return !CFP->isInfinity();
3665 
3667  return false;
3668 
3669  if (auto *Inst = dyn_cast<Instruction>(V)) {
3670  switch (Inst->getOpcode()) {
3671  case Instruction::Select: {
3672  return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3673  isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3674  }
3675  case Instruction::SIToFP:
3676  case Instruction::UIToFP: {
3677  // Get width of largest magnitude integer (remove a bit if signed).
3678  // This still works for a signed minimum value because the largest FP
3679  // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3680  int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3681  if (Inst->getOpcode() == Instruction::SIToFP)
3682  --IntSize;
3683 
3684  // If the exponent of the largest finite FP value can hold the largest
3685  // integer, the result of the cast must be finite.
3686  Type *FPTy = Inst->getType()->getScalarType();
3687  return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3688  }
3689  default:
3690  break;
3691  }
3692  }
3693 
3694  // try to handle fixed width vector constants
3695  auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3696  if (VFVTy && isa<Constant>(V)) {
3697  // For vectors, verify that each element is not infinity.
3698  unsigned NumElts = VFVTy->getNumElements();
3699  for (unsigned i = 0; i != NumElts; ++i) {
3700  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3701  if (!Elt)
3702  return false;
3703  if (isa<UndefValue>(Elt))
3704  continue;
3705  auto *CElt = dyn_cast<ConstantFP>(Elt);
3706  if (!CElt || CElt->isInfinity())
3707  return false;
3708  }
3709  // All elements were confirmed non-infinity or undefined.
3710  return true;
3711  }
3712 
3713  // was not able to prove that V never contains infinity
3714  return false;
3715 }
3716 
3718  unsigned Depth) {
3719  assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3720 
3721  // If we're told that NaNs won't happen, assume they won't.
3722  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3723  if (FPMathOp->hasNoNaNs())
3724  return true;
3725 
3726  // Handle scalar constants.
3727  if (auto *CFP = dyn_cast<ConstantFP>(V))
3728  return !CFP->isNaN();
3729 
3731  return false;
3732 
3733  if (auto *Inst = dyn_cast<Instruction>(V)) {
3734  switch (Inst->getOpcode()) {
3735  case Instruction::FAdd:
3736  case Instruction::FSub:
3737  // Adding positive and negative infinity produces NaN.
3738  return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3739  isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3740  (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3741  isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3742 
3743  case Instruction::FMul:
3744  // Zero multiplied with infinity produces NaN.
3745  // FIXME: If neither side can be zero fmul never produces NaN.
3746  return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3747  isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3748  isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3749  isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3750 
3751  case Instruction::FDiv:
3752  case Instruction::FRem:
3753  // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3754  return false;
3755 
3756  case Instruction::Select: {
3757  return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3758  isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3759  }
3760  case Instruction::SIToFP:
3761  case Instruction::UIToFP:
3762  return true;
3763  case Instruction::FPTrunc:
3764  case Instruction::FPExt:
3765  return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3766  default:
3767  break;
3768  }
3769  }
3770 
3771  if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3772  switch (II->getIntrinsicID()) {
3773  case Intrinsic::canonicalize:
3774  case Intrinsic::fabs:
3775  case Intrinsic::copysign:
3776  case Intrinsic::exp:
3777  case Intrinsic::exp2:
3778  case Intrinsic::floor:
3779  case Intrinsic::ceil:
3780  case Intrinsic::trunc:
3781  case Intrinsic::rint:
3782  case Intrinsic::nearbyint:
3783  case Intrinsic::round:
3784  case Intrinsic::roundeven:
3785  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3786  case Intrinsic::sqrt:
3787  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3788  CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3789  case Intrinsic::minnum:
3790  case Intrinsic::maxnum:
3791  // If either operand is not NaN, the result is not NaN.
3792  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3793  isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3794  default:
3795  return false;
3796  }
3797  }
3798 
3799  // Try to handle fixed width vector constants
3800  auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3801  if (VFVTy && isa<Constant>(V)) {
3802  // For vectors, verify that each element is not NaN.
3803  unsigned NumElts = VFVTy->getNumElements();
3804  for (unsigned i = 0; i != NumElts; ++i) {
3805  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3806  if (!Elt)
3807  return false;
3808  if (isa<UndefValue>(Elt))
3809  continue;
3810  auto *CElt = dyn_cast<ConstantFP>(Elt);
3811  if (!CElt || CElt->isNaN())
3812  return false;
3813  }
3814  // All elements were confirmed not-NaN or undefined.
3815  return true;
3816  }
3817 
3818  // Was not able to prove that V never contains NaN
3819  return false;
3820 }
3821 
3823 
3824  // All byte-wide stores are splatable, even of arbitrary variables.
3825  if (V->getType()->isIntegerTy(8))
3826  return V;
3827 
3828  LLVMContext &Ctx = V->getContext();
3829 
3830  // Undef don't care.
3831  auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3832  if (isa<UndefValue>(V))
3833  return UndefInt8;
3834 
3835  // Return Undef for zero-sized type.
3836  if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3837  return UndefInt8;
3838 
3839  Constant *C = dyn_cast<Constant>(V);
3840  if (!C) {
3841  // Conceptually, we could handle things like:
3842  // %a = zext i8 %X to i16
3843  // %b = shl i16 %a, 8
3844  // %c = or i16 %a, %b
3845  // but until there is an example that actually needs this, it doesn't seem
3846  // worth worrying about.
3847  return nullptr;
3848  }
3849 
3850  // Handle 'null' ConstantArrayZero etc.
3851  if (C->isNullValue())
3853 
3854  // Constant floating-point values can be handled as integer values if the
3855  // corresponding integer value is "byteable". An important case is 0.0.
3856  if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3857  Type *Ty = nullptr;
3858  if (CFP->getType()->isHalfTy())
3859  Ty = Type::getInt16Ty(Ctx);
3860  else if (CFP->getType()->isFloatTy())
3861  Ty = Type::getInt32Ty(Ctx);
3862  else if (CFP->getType()->isDoubleTy())
3863  Ty = Type::getInt64Ty(Ctx);
3864  // Don't handle long double formats, which have strange constraints.
3865  return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3866  : nullptr;
3867  }
3868 
3869  // We can handle constant integers that are multiple of 8 bits.
3870  if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3871  if (CI->getBitWidth() % 8 == 0) {
3872  assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3873  if (!CI->getValue().isSplat(8))
3874  return nullptr;
3875  return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3876  }
3877  }
3878 
3879  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3880  if (CE->getOpcode() == Instruction::IntToPtr) {
3881  if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3882  unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3883  return isBytewiseValue(
3884  ConstantExpr::getIntegerCast(CE->getOperand(0),
3885  Type::getIntNTy(Ctx, BitWidth), false),
3886  DL);
3887  }
3888  }
3889  }
3890 
3891  auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3892  if (LHS == RHS)
3893  return LHS;
3894  if (!LHS || !RHS)
3895  return nullptr;
3896  if (LHS == UndefInt8)
3897  return RHS;
3898  if (RHS == UndefInt8)
3899  return LHS;
3900  return nullptr;
3901  };
3902 
3903  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3904  Value *Val = UndefInt8;
3905  for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3906  if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3907  return nullptr;
3908  return Val;
3909  }
3910 
3911  if (isa<ConstantAggregate>(C)) {
3912  Value *Val = UndefInt8;
3913  for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3914  if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3915  return nullptr;
3916  return Val;
3917  }
3918 
3919  // Don't try to handle the handful of other constants.
3920  return nullptr;
3921 }
3922 
3923 // This is the recursive version of BuildSubAggregate. It takes a few different
3924 // arguments. Idxs is the index within the nested struct From that we are
3925 // looking at now (which is of type IndexedType). IdxSkip is the number of
3926 // indices from Idxs that should be left out when inserting into the resulting
3927 // struct. To is the result struct built so far, new insertvalue instructions
3928 // build on that.
3929 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3931  unsigned IdxSkip,
3932  Instruction *InsertBefore) {
3933  StructType *STy = dyn_cast<StructType>(IndexedType);
3934  if (STy) {
3935  // Save the original To argument so we can modify it
3936  Value *OrigTo = To;
3937  // General case, the type indexed by Idxs is a struct
3938  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3939  // Process each struct element recursively
3940  Idxs.push_back(i);
3941  Value *PrevTo = To;
3942  To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3943  InsertBefore);
3944  Idxs.pop_back();
3945  if (!To) {
3946  // Couldn't find any inserted value for this index? Cleanup
3947  while (PrevTo != OrigTo) {
3948  InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3949  PrevTo = Del->getAggregateOperand();
3950  Del->eraseFromParent();
3951  }
3952  // Stop processing elements
3953  break;
3954  }
3955  }
3956  // If we successfully found a value for each of our subaggregates
3957  if (To)
3958  return To;
3959  }
3960  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3961  // the struct's elements had a value that was inserted directly. In the latter
3962  // case, perhaps we can't determine each of the subelements individually, but
3963  // we might be able to find the complete struct somewhere.
3964 
3965  // Find the value that is at that particular spot
3966  Value *V = FindInsertedValue(From, Idxs);
3967 
3968  if (!V)
3969  return nullptr;
3970 
3971  // Insert the value in the new (sub) aggregate
3972  return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3973  "tmp", InsertBefore);
3974 }
3975 
3976 // This helper takes a nested struct and extracts a part of it (which is again a
3977 // struct) into a new value. For example, given the struct:
3978 // { a, { b, { c, d }, e } }
3979 // and the indices "1, 1" this returns
3980 // { c, d }.
3981 //
3982 // It does this by inserting an insertvalue for each element in the resulting
3983 // struct, as opposed to just inserting a single struct. This will only work if
3984 // each of the elements of the substruct are known (ie, inserted into From by an
3985 // insertvalue instruction somewhere).
3986 //
3987 // All inserted insertvalue instructions are inserted before InsertBefore
3989  Instruction *InsertBefore) {
3990  assert(InsertBefore && "Must have someplace to insert!");
3991  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3992  idx_range);
3993  Value *To = UndefValue::get(IndexedType);
3994  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3995  unsigned IdxSkip = Idxs.size();
3996 
3997  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3998 }
3999 
4000 /// Given an aggregate and a sequence of indices, see if the scalar value
4001 /// indexed is already around as a register, for example if it was inserted
4002 /// directly into the aggregate.
4003 ///
4004 /// If InsertBefore is not null, this function will duplicate (modified)
4005 /// insertvalues when a part of a nested struct is extracted.
4007  Instruction *InsertBefore) {
4008  // Nothing to index? Just return V then (this is useful at the end of our
4009  // recursion).
4010  if (idx_range.empty())
4011  return V;
4012  // We have indices, so V should have an indexable type.
4013  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
4014  "Not looking at a struct or array?");
4015  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
4016  "Invalid indices for type?");
4017 
4018  if (Constant *C = dyn_cast<Constant>(V)) {
4019  C = C->getAggregateElement(idx_range[0]);
4020  if (!C) return nullptr;
4021  return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4022  }
4023 
4024  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4025  // Loop the indices for the insertvalue instruction in parallel with the
4026  // requested indices
4027  const unsigned *req_idx = idx_range.begin();
4028  for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4029  i != e; ++i, ++req_idx) {
4030  if (req_idx == idx_range.end()) {
4031  // We can't handle this without inserting insertvalues
4032  if (!InsertBefore)
4033  return nullptr;
4034 
4035  // The requested index identifies a part of a nested aggregate. Handle
4036  // this specially. For example,
4037  // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4038  // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4039  // %C = extractvalue {i32, { i32, i32 } } %B, 1
4040  // This can be changed into
4041  // %A = insertvalue {i32, i32 } undef, i32 10, 0
4042  // %C = insertvalue {i32, i32 } %A, i32 11, 1
4043  // which allows the unused 0,0 element from the nested struct to be
4044  // removed.
4045  return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4046  InsertBefore);
4047  }
4048 
4049  // This insert value inserts something else than what we are looking for.
4050  // See if the (aggregate) value inserted into has the value we are
4051  // looking for, then.
4052  if (*req_idx != *i)
4053  return FindInsertedValue(I->getAggregateOperand(), idx_range,
4054  InsertBefore);
4055  }
4056  // If we end up here, the indices of the insertvalue match with those
4057  // requested (though possibly only partially). Now we recursively look at
4058  // the inserted value, passing any remaining indices.
4059  return FindInsertedValue(I->getInsertedValueOperand(),
4060  makeArrayRef(req_idx, idx_range.end()),
4061  InsertBefore);
4062  }
4063 
4064  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4065  // If we're extracting a value from an aggregate that was extracted from
4066  // something else, we can extract from that something else directly instead.
4067  // However, we will need to chain I's indices with the requested indices.
4068 
4069  // Calculate the number of indices required
4070  unsigned size = I->getNumIndices() + idx_range.size();
4071  // Allocate some space to put the new indices in
4073  Idxs.reserve(size);
4074  // Add indices from the extract value instruction
4075  Idxs.append(I->idx_begin(), I->idx_end());
4076 
4077  // Add requested indices
4078  Idxs.append(idx_range.begin(), idx_range.end());
4079 
4080  assert(Idxs.size() == size
4081  && "Number of indices added not correct?");
4082 
4083  return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4084  }
4085  // Otherwise, we don't know (such as, extracting from a function return value
4086  // or load instruction)
4087  return nullptr;
4088 }
4089 
4091  unsigned CharSize) {
4092  // Make sure the GEP has exactly three arguments.
4093  if (GEP->getNumOperands() != 3)
4094  return false;
4095 
4096  // Make sure the index-ee is a pointer to array of \p CharSize integers.
4097  // CharSize.
4098  ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4099  if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4100  return false;
4101 
4102  // Check to make sure that the first operand of the GEP is an integer and
4103  // has value 0 so that we are sure we're indexing into the initializer.
4104  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4105  if (!FirstIdx || !FirstIdx->isZero())
4106  return false;
4107 
4108  return true;
4109 }
4110 
4112  ConstantDataArraySlice &Slice,
4113  unsigned ElementSize, uint64_t Offset) {
4114  assert(V);
4115 
4116  // Look through bitcast instructions and geps.
4117  V = V->stripPointerCasts();
4118 
4119  // If the value is a GEP instruction or constant expression, treat it as an
4120  // offset.
4121  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4122  // The GEP operator should be based on a pointer to string constant, and is
4123  // indexing into the string constant.
4124  if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4125  return false;
4126 
4127  // If the second index isn't a ConstantInt, then this is a variable index
4128  // into the array. If this occurs, we can't say anything meaningful about
4129  // the string.
4130  uint64_t StartIdx = 0;
4131  if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4132  StartIdx = CI->getZExtValue();
4133  else
4134  return false;
4135  return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4136  StartIdx + Offset);
4137  }
4138 
4139  // The GEP instruction, constant or instruction, must reference a global
4140  // variable that is a constant and is initialized. The referenced constant
4141  // initializer is the array that we'll use for optimization.
4142  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4143  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4144  return false;
4145 
4146  const ConstantDataArray *Array;
4147  ArrayType *ArrayTy;
4148  if (GV->getInitializer()->isNullValue()) {
4149  Type *GVTy = GV->getValueType();
4150  if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4151  // A zeroinitializer for the array; there is no ConstantDataArray.
4152  Array = nullptr;
4153  } else {
4154  const DataLayout &DL = GV->getParent()->getDataLayout();
4155  uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4156  uint64_t Length = SizeInBytes / (ElementSize / 8);
4157  if (Length <= Offset)
4158  return false;
4159 
4160  Slice.Array = nullptr;
4161  Slice.Offset = 0;
4162  Slice.Length = Length - Offset;
4163  return true;
4164  }
4165  } else {
4166  // This must be a ConstantDataArray.
4167  Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4168  if (!Array)
4169  return false;
4170  ArrayTy = Array->getType();
4171  }
4172  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4173  return false;
4174 
4175  uint64_t NumElts = ArrayTy->getArrayNumElements();
4176  if (Offset > NumElts)
4177  return false;
4178 
4179  Slice.Array = Array;
4180  Slice.Offset = Offset;
4181  Slice.Length = NumElts - Offset;
4182  return true;
4183 }
4184 
4185 /// This function computes the length of a null-terminated C string pointed to
4186 /// by V. If successful, it returns true and returns the string in Str.
4187 /// If unsuccessful, it returns false.
4189  uint64_t Offset, bool TrimAtNul) {
4190  ConstantDataArraySlice Slice;
4191  if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4192  return false;
4193 
4194  if (Slice.Array == nullptr) {
4195  if (TrimAtNul) {
4196  Str = StringRef();
4197  return true;
4198  }
4199  if (Slice.Length == 1) {
4200  Str = StringRef("", 1);
4201  return true;
4202  }
4203  // We cannot instantiate a StringRef as we do not have an appropriate string
4204  // of 0s at hand.
4205  return false;
4206  }
4207 
4208  // Start out with the entire array in the StringRef.
4209  Str = Slice.Array->getAsString();
4210  // Skip over 'offset' bytes.
4211  Str = Str.substr(Slice.Offset);
4212 
4213  if (TrimAtNul) {
4214  // Trim off the \0 and anything after it. If the array is not nul
4215  // terminated, we just return the whole end of string. The client may know
4216  // some other way that the string is length-bound.
4217  Str = Str.substr(0, Str.find('\0'));
4218  }
4219  return true;
4220 }
4221 
4222 // These next two are very similar to the above, but also look through PHI
4223 // nodes.
4224 // TODO: See if we can integrate these two together.
4225 
4226 /// If we can compute the length of the string pointed to by
4227 /// the specified pointer, return 'len+1'. If we can't, return 0.
4228 static uint64_t GetStringLengthH(const Value *V,
4230  unsigned CharSize) {
4231  // Look through noop bitcast instructions.
4232  V = V->stripPointerCasts();
4233 
4234  // If this is a PHI node, there are two cases: either we have already seen it
4235  // or we haven't.
4236  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4237  if (!PHIs.insert(PN).second)
4238  return ~0ULL; // already in the set.
4239 
4240  // If it was new, see if all the input strings are the same length.
4241  uint64_t LenSoFar = ~0ULL;
4242  for (Value *IncValue : PN->incoming_values()) {
4243  uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4244  if (Len == 0) return 0; // Unknown length -> unknown.
4245 
4246  if (Len == ~0ULL) continue;
4247 
4248  if (Len != LenSoFar && LenSoFar != ~0ULL)
4249  return 0; // Disagree -> unknown.
4250  LenSoFar = Len;
4251  }
4252 
4253  // Success, all agree.
4254  return LenSoFar;
4255  }
4256 
4257  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4258  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4259  uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4260  if (Len1 == 0) return 0;
4261  uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4262  if (Len2 == 0) return 0;
4263  if (Len1 == ~0ULL) return Len2;
4264  if (Len2 == ~0ULL) return Len1;
4265  if (Len1 != Len2) return 0;
4266  return Len1;
4267  }
4268 
4269  // Otherwise, see if we can read the string.
4270  ConstantDataArraySlice Slice;
4271  if (!getConstantDataArrayInfo(V, Slice, CharSize))
4272  return 0;
4273 
4274  if (Slice.Array == nullptr)
4275  return 1;
4276 
4277  // Search for nul characters
4278  unsigned NullIndex = 0;
4279  for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4280  if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4281  break;
4282  }
4283 
4284  return NullIndex + 1;
4285 }
4286 
4287 /// If we can compute the length of the string pointed to by
4288 /// the specified pointer, return 'len+1'. If we can't, return 0.
4289 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4290  if (!V->getType()->isPointerTy())
4291  return 0;
4292 
4294  uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4295  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4296  // an empty string as a length.
4297  return Len == ~0ULL ? 1 : Len;
4298 }
4299 
4300 const Value *
4302  bool MustPreserveNullness) {
4303  assert(Call &&
4304  "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4305  if (const Value *RV = Call->getReturnedArgOperand())
4306  return RV;
4307  // This can be used only as a aliasing property.
4309  Call, MustPreserveNullness))
4310  return Call->getArgOperand(0);
4311  return nullptr;
4312 }
4313 
4315  const CallBase *Call, bool MustPreserveNullness) {
4316  switch (Call->getIntrinsicID()) {
4317  case Intrinsic::launder_invariant_group:
4318  case Intrinsic::strip_invariant_group:
4319  case Intrinsic::aarch64_irg:
4320  case Intrinsic::aarch64_tagp:
4321  return true;
4322  case Intrinsic::ptrmask:
4323  return !MustPreserveNullness;
4324  default:
4325  return false;
4326  }
4327 }
4328 
4329 /// \p PN defines a loop-variant pointer to an object. Check if the
4330 /// previous iteration of the loop was referring to the same object as \p PN.
4332  const LoopInfo *LI) {
4333  // Find the loop-defined value.
4334  Loop *L = LI->getLoopFor(PN->getParent());
4335  if (PN->getNumIncomingValues() != 2)
4336  return true;
4337 
4338  // Find the value from previous iteration.
4339  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4340  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4341  PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4342  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4343  return true;
4344 
4345  // If a new pointer is loaded in the loop, the pointer references a different
4346  // object in every iteration. E.g.:
4347  // for (i)
4348  // int *p = a[i];
4349  // ...
4350  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4351  if (!L->isLoopInvariant(Load->getPointerOperand()))
4352  return false;
4353  return true;
4354 }
4355 
4356 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4357  if (!V->getType()->isPointerTy())
4358  return V;
4359  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4360  if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4361  V = GEP->getPointerOperand();
4362  } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4363  Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4364  V = cast<Operator>(V)->getOperand(0);
4365  if (!V->getType()->isPointerTy())
4366  return V;
4367  } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4368  if (GA->isInterposable())
4369  return V;
4370  V = GA->getAliasee();
4371  } else {
4372  if (auto *PHI = dyn_cast<PHINode>(V)) {
4373  // Look through single-arg phi nodes created by LCSSA.
4374  if (PHI->getNumIncomingValues() == 1) {
4375  V = PHI->getIncomingValue(0);
4376  continue;
4377  }
4378  } else if (auto *Call = dyn_cast<CallBase>(V)) {
4379  // CaptureTracking can know about special capturing properties of some
4380  // intrinsics like launder.invariant.group, that can't be expressed with
4381  // the attributes, but have properties like returning aliasing pointer.
4382  // Because some analysis may assume that nocaptured pointer is not
4383  // returned from some special intrinsic (because function would have to
4384  // be marked with returns attribute), it is crucial to use this function
4385  // because it should be in sync with CaptureTracking. Not using it may
4386  // cause weird miscompilations where 2 aliasing pointers are assumed to
4387  // noalias.
4388  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4389  V = RP;
4390  continue;
4391  }
4392  }
4393 
4394  return V;
4395  }
4396  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4397  }
4398  return V;
4399 }
4400 
4403  LoopInfo *LI, unsigned MaxLookup) {
4406  Worklist.push_back(V);
4407  do {
4408  const Value *P = Worklist.pop_back_val();
4409  P = getUnderlyingObject(P, MaxLookup);
4410 
4411  if (!Visited.insert(P).second)
4412  continue;
4413 
4414  if (auto *SI = dyn_cast<SelectInst>(P)) {
4415  Worklist.push_back(SI->getTrueValue());
4416  Worklist.push_back(SI->getFalseValue());
4417  continue;
4418  }
4419 
4420  if (auto *PN = dyn_cast<PHINode>(P)) {
4421  // If this PHI changes the underlying object in every iteration of the
4422  // loop, don't look through it. Consider:
4423  // int **A;
4424  // for (i) {
4425  // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4426  // Curr = A[i];
4427  // *Prev, *Curr;
4428  //
4429  // Prev is tracking Curr one iteration behind so they refer to different
4430  // underlying objects.
4431  if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4433  append_range(Worklist, PN->incoming_values());
4434  continue;
4435  }
4436 
4437  Objects.push_back(P);
4438  } while (!Worklist.empty());
4439 }
4440 
4441 /// This is the function that does the work of looking through basic
4442 /// ptrtoint+arithmetic+inttoptr sequences.
4443 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4444  do {
4445  if (const Operator *U = dyn_cast<Operator>(V)) {
4446  // If we find a ptrtoint, we can transfer control back to the
4447  // regular getUnderlyingObjectFromInt.
4448  if (U->getOpcode() == Instruction::PtrToInt)
4449  return U->getOperand(0);
4450  // If we find an add of a constant, a multiplied value, or a phi, it's
4451  // likely that the other operand will lead us to the base
4452  // object. We don't have to worry about the case where the
4453  // object address is somehow being computed by the multiply,
4454  // because our callers only care when the result is an
4455  // identifiable object.
4456  if (U->getOpcode() != Instruction::Add ||
4457  (!isa<ConstantInt>(U->getOperand(1)) &&
4458  Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4459  !isa<PHINode>(U->getOperand(1))))
4460  return V;
4461  V = U->getOperand(0);
4462  } else {
4463  return V;
4464  }
4465  assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4466  } while (true);
4467 }
4468 
4469 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4470 /// ptrtoint+arithmetic+inttoptr sequences.
4471 /// It returns false if unidentified object is found in getUnderlyingObjects.
4473  SmallVectorImpl<Value *> &Objects) {
4475  SmallVector<const Value *, 4> Working(1, V);
4476  do {
4477  V = Working.pop_back_val();
4478 
4480  getUnderlyingObjects(V, Objs);
4481 
4482  for (const Value *V : Objs) {
4483  if (!Visited.insert(V).second)
4484  continue;
4485  if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4486  const Value *O =
4487  getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4488  if (O->getType()->isPointerTy()) {
4489  Working.push_back(O);
4490  continue;
4491  }
4492  }
4493  // If getUnderlyingObjects fails to find an identifiable object,
4494  // getUnderlyingObjectsForCodeGen also fails for safety.
4495  if (!isIdentifiedObject(V)) {
4496  Objects.clear();
4497  return false;
4498  }
4499  Objects.push_back(const_cast<Value *>(V));
4500  }
4501  } while (!Working.empty());
4502  return true;
4503 }
4504 
4506  AllocaInst *Result = nullptr;
4507  SmallPtrSet<Value *, 4> Visited;
4508  SmallVector<Value *, 4> Worklist;
4509 
4510  auto AddWork = [&](Value *V) {
4511  if (Visited.insert(V).second)
4512  Worklist.push_back(V);
4513  };
4514 
4515  AddWork(V);
4516  do {
4517  V = Worklist.pop_back_val();
4518  assert(Visited.count(V));
4519 
4520  if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4521  if (Result && Result != AI)
4522  return nullptr;
4523  Result = AI;
4524  } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4525  AddWork(CI->getOperand(0));
4526  } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4527  for (Value *IncValue : PN->incoming_values())
4528  AddWork(IncValue);
4529  } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4530  AddWork(SI->getTrueValue());
4531  AddWork(SI->getFalseValue());
4532  } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4533  if (OffsetZero && !GEP->hasAllZeroIndices())
4534  return nullptr;
4535  AddWork(GEP->getPointerOperand());
4536  } else {
4537  return nullptr;
4538  }
4539  } while (!Worklist.empty());
4540 
4541  return Result;
4542 }
4543 
4545  const Value *V, bool AllowLifetime, bool AllowDroppable) {
4546  for (const User *U : V->users()) {
4547  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4548  if (!II)
4549  return false;
4550 
4551  if (AllowLifetime && II->isLifetimeStartOrEnd())
4552  continue;
4553 
4554  if (AllowDroppable && II->isDroppable())
4555  continue;
4556 
4557  return false;
4558  }
4559  return true;
4560 }
4561 
4564  V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4565 }
4568  V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4569 }
4570 
4572  if (!LI.isUnordered())
4573  return true;
4574  const Function &F = *LI.getFunction();
4575  // Speculative load may create a race that did not exist in the source.
4576  return F.hasFnAttribute(Attribute::SanitizeThread) ||
4577  // Speculative load may load data from dirty regions.
4578  F.hasFnAttribute(Attribute::SanitizeAddress) ||
4579  F.hasFnAttribute(Attribute::SanitizeHWAddress);
4580 }
4581 
4582 
4584  const Instruction *CtxI,
4585  const DominatorTree *DT,
4586  const TargetLibraryInfo *TLI) {
4587  const Operator *Inst = dyn_cast<Operator>(V);
4588  if (!Inst)
4589  return false;
4590 
4591  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4592  if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4593  if (C->canTrap())
4594  return false;
4595 
4596  switch (Inst->getOpcode()) {
4597  default:
4598  return true;
4599  case Instruction::UDiv:
4600  case Instruction::URem: {
4601  // x / y is undefined if y == 0.
4602  const APInt *V;
4603  if (match(Inst->getOperand(1), m_APInt(V)))
4604  return *V != 0;
4605  return false;
4606  }
4607  case Instruction::SDiv:
4608  case Instruction::SRem: {
4609  // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4610  const APInt *Numerator, *Denominator;
4611  if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4612  return false;
4613  // We cannot hoist this division if the denominator is 0.
4614  if (*Denominator == 0)
4615  return false;
4616  // It's safe to hoist if the denominator is not 0 or -1.
4617  if (!Denominator->isAllOnesValue())
4618  return true;
4619  // At this point we know that the denominator is -1. It is safe to hoist as
4620  // long we know that the numerator is not INT_MIN.
4621  if (match(Inst->getOperand(0), m_APInt(Numerator)))
4622  return !Numerator->isMinSignedValue();
4623  // The numerator *might* be MinSignedValue.
4624  return false;
4625  }
4626  case Instruction::Load: {
4627  const LoadInst *LI = cast<LoadInst>(Inst);
4628  if (mustSuppressSpeculation(*LI))
4629  return false;
4630  const DataLayout &DL = LI->getModule()->getDataLayout();
4632  LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4633  DL, CtxI, DT, TLI);
4634  }
4635  case Instruction::Call: {
4636  auto *CI = cast<const CallInst>(Inst);
4637  const Function *Callee = CI->getCalledFunction();
4638 
4639  // The called function could have undefined behavior or side-effects, even
4640  // if marked readnone nounwind.
4641  return Callee && Callee->isSpeculatable();
4642  }
4643  case Instruction::VAArg:
4644  case Instruction::Alloca:
4645  case Instruction::Invoke:
4646  case Instruction::CallBr:
4647  case Instruction::PHI:
4648  case Instruction::Store:
4649  case Instruction::Ret:
4650  case Instruction::Br:
4651  case Instruction::IndirectBr:
4652  case Instruction::Switch:
4653  case Instruction::Unreachable:
4654  case Instruction::Fence:
4655  case Instruction::AtomicRMW:
4656  case Instruction::AtomicCmpXchg:
4657  case Instruction::LandingPad:
4658  case Instruction::Resume:
4659  case Instruction::CatchSwitch:
4660  case Instruction::CatchPad:
4661  case Instruction::CatchRet:
4662  case Instruction::CleanupPad:
4663  case Instruction::CleanupRet:
4664  return false; // Misc instructions which have effects
4665  }
4666 }
4667 
4669  return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4670 }
4671 
4672 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4674  switch (OR) {
4683  }
4684  llvm_unreachable("Unknown OverflowResult");
4685 }
4686 
4687 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4689  const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4690  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4691  OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4692  KnownBits Known = computeKnownBits(
4693  V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4694  ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4695  ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4698  return CR1.intersectWith(CR2, RangeType);
4699 }
4700 
4702  const Value *LHS, const Value *RHS, const DataLayout &DL,
4703  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4704  bool UseInstrInfo) {
4705  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4706  nullptr, UseInstrInfo);
4707  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4708  nullptr, UseInstrInfo);
4709  ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4710  ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4711  return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4712 }
4713 
4716  const DataLayout &DL, AssumptionCache *AC,
4717  const Instruction *CxtI,
4718  const DominatorTree *DT, bool UseInstrInfo) {
4719  // Multiplying n * m significant bits yields a result of n + m significant
4720  // bits. If the total number of significant bits does not exceed the
4721  // result bit width (minus 1), there is no overflow.
4722  // This means if we have enough leading sign bits in the operands
4723  // we can guarantee that the result does not overflow.
4724  // Ref: "Hacker's Delight" by Henry Warren
4725  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4726 
4727  // Note that underestimating the number of sign bits gives a more
4728  // conservative answer.
4729  unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4730  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4731 
4732  // First handle the easy case: if we have enough sign bits there's
4733  // definitely no overflow.
4734  if (SignBits > BitWidth + 1)
4736 
4737  // There are two ambiguous cases where there can be no overflow:
4738  // SignBits == BitWidth + 1 and
4739  // SignBits == BitWidth
4740  // The second case is difficult to check, therefore we only handle the
4741  // first case.
4742  if (SignBits == BitWidth + 1) {
4743  // It overflows only when both arguments are negative and the true
4744  // product is exactly the minimum negative number.
4745  // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4746  // For simplicity we just check if at least one side is not negative.
4747  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4748  nullptr, UseInstrInfo);
4749  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4750  nullptr, UseInstrInfo);
4751  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4753  }
4755 }
4756 
4758  const Value *LHS, const Value *RHS, const DataLayout &DL,
4759  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4760  bool UseInstrInfo) {
4762  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4763  nullptr, UseInstrInfo);
4765  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4766  nullptr, UseInstrInfo);
4767  return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4768 }
4769 
4771  const Value *RHS,
4772  const AddOperator *Add,
4773  const DataLayout &DL,
4774  AssumptionCache *AC,
4775  const Instruction *CxtI,
4776  const DominatorTree *DT) {
4777  if (Add && Add->hasNoSignedWrap()) {
4779  }
4780 
4781  // If LHS and RHS each have at least two sign bits, the addition will look
4782  // like
4783  //
4784  // XX..... +
4785  // YY.....
4786  //
4787  // If the carry into the most significant position is 0, X and Y can't both
4788  // be 1 and therefore the carry out of the addition is also 0.
4789  //
4790  // If the carry into the most significant position is 1, X and Y can't both
4791  // be 0 and therefore the carry out of the addition is also 1.
4792  //
4793  // Since the carry into the most significant position is always equal to
4794  // the carry out of the addition, there is no signed overflow.
4795  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4796  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4798 
4800  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4802  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4803  OverflowResult OR =
4804  mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4806  return OR;
4807 
4808  // The remaining code needs Add to be available. Early returns if not so.
4809  if (!Add)
4811 
4812  // If the sign of Add is the same as at least one of the operands, this add
4813  // CANNOT overflow. If this can be determined from the known bits of the
4814  // operands the above signedAddMayOverflow() check will have already done so.
4815  // The only other way to improve on the known bits is from an assumption, so
4816  // call computeKnownBitsFromAssume() directly.
4817  bool LHSOrRHSKnownNonNegative =
4818  (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4819  bool LHSOrRHSKnownNegative =
4820  (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4821  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4822  KnownBits AddKnown(LHSRange.getBitWidth());
4824  Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4825  if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4826  (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4828  }
4829 
4831 }
4832 
4834  const Value *RHS,
4835  const DataLayout &DL,
4836  AssumptionCache *AC,
4837  const Instruction *CxtI,
4838  const DominatorTree *DT) {
4839  // Checking for conditions implied by dominating conditions may be expensive.
4840  // Limit it to usub_with_overflow calls for now.
4841  if (match(CxtI,
4842  m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4843  if (auto C =
4844  isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4845  if (*C)
4848  }
4850  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4852  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4853  return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4854 }
4855 
4857  const Value *RHS,
4858  const DataLayout &DL,
4859  AssumptionCache *AC,
4860  const Instruction *CxtI,
4861  const DominatorTree *DT) {
4862  // If LHS and RHS each have at least two sign bits, the subtraction
4863  // cannot overflow.
4864  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4865  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4867 
4869  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4871  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4872  return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4873 }
4874 
4876  const DominatorTree &DT) {
4877  SmallVector<const BranchInst *, 2> GuardingBranches;
4879 
4880  for (const User *U : WO->users()) {
4881  if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4882  assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4883 
4884  if (EVI->getIndices()[0] == 0)
4885  Results.push_back(EVI);
4886  else {
4887  assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4888 
4889  for (const auto *U : EVI->users())
4890  if (const auto *B = dyn_cast<BranchInst>(U)) {
4891  assert(B->isConditional() && "How else is it using an i1?");
4892  GuardingBranches.push_back(B);
4893  }
4894  }
4895  } else {
4896  // We are using the aggregate directly in a way we don't want to analyze
4897  // here (storing it to a global, say).
4898  return false;
4899  }
4900  }
4901 
4902  auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4903  BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4904  if (!NoWrapEdge.isSingleEdge())
4905  return false;
4906 
4907  // Check if all users of the add are provably no-wrap.
4908  for (const auto *Result : Results) {
4909  // If the extractvalue itself is not executed on overflow, the we don't
4910  // need to check each use separately, since domination is transitive.
4911  if (DT.dominates(NoWrapEdge, Result->getParent()))
4912  continue;
4913 
4914  for (auto &RU : Result->uses())
4915  if (!DT.dominates(NoWrapEdge, RU))
4916  return false;
4917  }
4918 
4919  return true;
4920  };
4921 
4922  return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4923 }
4924 
4925 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4926  // See whether I has flags that may create poison
4927  if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4928  if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4929  return true;
4930  }
4931  if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4932  if (ExactOp->isExact())
4933  return true;
4934  if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4935