LLVM 19.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
24#include "llvm/IR/CFG.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/Module.h"
36#include "llvm/IR/ValueHandle.h"
38#include "llvm/Support/Debug.h"
42#include <optional>
43using namespace llvm;
44using namespace PatternMatch;
45
46#define DEBUG_TYPE "lazy-value-info"
47
48// This is the number of worklist items we will process to try to discover an
49// answer for a given value.
50static const unsigned MaxProcessedPerValue = 500;
51
55}
57 "Lazy Value Information Analysis", false, true)
62
63namespace llvm {
65 return new LazyValueInfoWrapperPass();
66}
67} // namespace llvm
68
69AnalysisKey LazyValueAnalysis::Key;
70
71/// Returns true if this lattice value represents at most one possible value.
72/// This is as precise as any lattice value can get while still representing
73/// reachable code.
74static bool hasSingleValue(const ValueLatticeElement &Val) {
75 if (Val.isConstantRange() &&
77 // Integer constants are single element ranges
78 return true;
79 if (Val.isConstant())
80 // Non integer constants
81 return true;
82 return false;
83}
84
85/// Combine two sets of facts about the same value into a single set of
86/// facts. Note that this method is not suitable for merging facts along
87/// different paths in a CFG; that's what the mergeIn function is for. This
88/// is for merging facts gathered about the same value at the same location
89/// through two independent means.
90/// Notes:
91/// * This method does not promise to return the most precise possible lattice
92/// value implied by A and B. It is allowed to return any lattice element
93/// which is at least as strong as *either* A or B (unless our facts
94/// conflict, see below).
95/// * Due to unreachable code, the intersection of two lattice values could be
96/// contradictory. If this happens, we return some valid lattice value so as
97/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
98/// we do not make this guarantee. TODO: This would be a useful enhancement.
100 const ValueLatticeElement &B) {
101 // Undefined is the strongest state. It means the value is known to be along
102 // an unreachable path.
103 if (A.isUnknown())
104 return A;
105 if (B.isUnknown())
106 return B;
107
108 // If we gave up for one, but got a useable fact from the other, use it.
109 if (A.isOverdefined())
110 return B;
111 if (B.isOverdefined())
112 return A;
113
114 // Can't get any more precise than constants.
115 if (hasSingleValue(A))
116 return A;
117 if (hasSingleValue(B))
118 return B;
119
120 // Could be either constant range or not constant here.
121 if (!A.isConstantRange() || !B.isConstantRange()) {
122 // TODO: Arbitrary choice, could be improved
123 return A;
124 }
125
126 // Intersect two constant ranges
128 A.getConstantRange().intersectWith(B.getConstantRange());
129 // Note: An empty range is implicitly converted to unknown or undef depending
130 // on MayIncludeUndef internally.
132 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
133 B.isConstantRangeIncludingUndef());
134}
135
136//===----------------------------------------------------------------------===//
137// LazyValueInfoCache Decl
138//===----------------------------------------------------------------------===//
139
140namespace {
141 /// A callback value handle updates the cache when values are erased.
142 class LazyValueInfoCache;
143 struct LVIValueHandle final : public CallbackVH {
144 LazyValueInfoCache *Parent;
145
146 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
147 : CallbackVH(V), Parent(P) { }
148
149 void deleted() override;
150 void allUsesReplacedWith(Value *V) override {
151 deleted();
152 }
153 };
154} // end anonymous namespace
155
156namespace {
157using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
158
159/// This is the cache kept by LazyValueInfo which
160/// maintains information about queries across the clients' queries.
161class LazyValueInfoCache {
162 /// This is all of the cached information for one basic block. It contains
163 /// the per-value lattice elements, as well as a separate set for
164 /// overdefined values to reduce memory usage. Additionally pointers
165 /// dereferenced in the block are cached for nullability queries.
166 struct BlockCacheEntry {
168 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
169 // std::nullopt indicates that the nonnull pointers for this basic block
170 // block have not been computed yet.
171 std::optional<NonNullPointerSet> NonNullPointers;
172 };
173
174 /// Cached information per basic block.
175 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
176 BlockCache;
177 /// Set of value handles used to erase values from the cache on deletion.
179
180 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
181 auto It = BlockCache.find_as(BB);
182 if (It == BlockCache.end())
183 return nullptr;
184 return It->second.get();
185 }
186
187 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
188 auto It = BlockCache.find_as(BB);
189 if (It == BlockCache.end())
190 It = BlockCache.insert({BB, std::make_unique<BlockCacheEntry>()}).first;
191
192 return It->second.get();
193 }
194
195 void addValueHandle(Value *Val) {
196 auto HandleIt = ValueHandles.find_as(Val);
197 if (HandleIt == ValueHandles.end())
198 ValueHandles.insert({Val, this});
199 }
200
201public:
202 void insertResult(Value *Val, BasicBlock *BB,
203 const ValueLatticeElement &Result) {
204 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
205
206 // Insert over-defined values into their own cache to reduce memory
207 // overhead.
208 if (Result.isOverdefined())
209 Entry->OverDefined.insert(Val);
210 else
211 Entry->LatticeElements.insert({Val, Result});
212
213 addValueHandle(Val);
214 }
215
216 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
217 BasicBlock *BB) const {
218 const BlockCacheEntry *Entry = getBlockEntry(BB);
219 if (!Entry)
220 return std::nullopt;
221
222 if (Entry->OverDefined.count(V))
224
225 auto LatticeIt = Entry->LatticeElements.find_as(V);
226 if (LatticeIt == Entry->LatticeElements.end())
227 return std::nullopt;
228
229 return LatticeIt->second;
230 }
231
232 bool
233 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
234 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
235 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
236 if (!Entry->NonNullPointers) {
237 Entry->NonNullPointers = InitFn(BB);
238 for (Value *V : *Entry->NonNullPointers)
239 addValueHandle(V);
240 }
241
242 return Entry->NonNullPointers->count(V);
243 }
244
245 /// clear - Empty the cache.
246 void clear() {
247 BlockCache.clear();
248 ValueHandles.clear();
249 }
250
251 /// Inform the cache that a given value has been deleted.
252 void eraseValue(Value *V);
253
254 /// This is part of the update interface to inform the cache
255 /// that a block has been deleted.
256 void eraseBlock(BasicBlock *BB);
257
258 /// Updates the cache to remove any influence an overdefined value in
259 /// OldSucc might have (unless also overdefined in NewSucc). This just
260 /// flushes elements from the cache and does not add any.
261 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
262};
263} // namespace
264
265void LazyValueInfoCache::eraseValue(Value *V) {
266 for (auto &Pair : BlockCache) {
267 Pair.second->LatticeElements.erase(V);
268 Pair.second->OverDefined.erase(V);
269 if (Pair.second->NonNullPointers)
270 Pair.second->NonNullPointers->erase(V);
271 }
272
273 auto HandleIt = ValueHandles.find_as(V);
274 if (HandleIt != ValueHandles.end())
275 ValueHandles.erase(HandleIt);
276}
277
278void LVIValueHandle::deleted() {
279 // This erasure deallocates *this, so it MUST happen after we're done
280 // using any and all members of *this.
281 Parent->eraseValue(*this);
282}
283
284void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
285 BlockCache.erase(BB);
286}
287
288void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
289 BasicBlock *NewSucc) {
290 // When an edge in the graph has been threaded, values that we could not
291 // determine a value for before (i.e. were marked overdefined) may be
292 // possible to solve now. We do NOT try to proactively update these values.
293 // Instead, we clear their entries from the cache, and allow lazy updating to
294 // recompute them when needed.
295
296 // The updating process is fairly simple: we need to drop cached info
297 // for all values that were marked overdefined in OldSucc, and for those same
298 // values in any successor of OldSucc (except NewSucc) in which they were
299 // also marked overdefined.
300 std::vector<BasicBlock*> worklist;
301 worklist.push_back(OldSucc);
302
303 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
304 if (!Entry || Entry->OverDefined.empty())
305 return; // Nothing to process here.
306 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
307 Entry->OverDefined.end());
308
309 // Use a worklist to perform a depth-first search of OldSucc's successors.
310 // NOTE: We do not need a visited list since any blocks we have already
311 // visited will have had their overdefined markers cleared already, and we
312 // thus won't loop to their successors.
313 while (!worklist.empty()) {
314 BasicBlock *ToUpdate = worklist.back();
315 worklist.pop_back();
316
317 // Skip blocks only accessible through NewSucc.
318 if (ToUpdate == NewSucc) continue;
319
320 // If a value was marked overdefined in OldSucc, and is here too...
321 auto OI = BlockCache.find_as(ToUpdate);
322 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
323 continue;
324 auto &ValueSet = OI->second->OverDefined;
325
326 bool changed = false;
327 for (Value *V : ValsToClear) {
328 if (!ValueSet.erase(V))
329 continue;
330
331 // If we removed anything, then we potentially need to update
332 // blocks successors too.
333 changed = true;
334 }
335
336 if (!changed) continue;
337
338 llvm::append_range(worklist, successors(ToUpdate));
339 }
340}
341
342namespace llvm {
343namespace {
344/// An assembly annotator class to print LazyValueCache information in
345/// comments.
346class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
347 LazyValueInfoImpl *LVIImpl;
348 // While analyzing which blocks we can solve values for, we need the dominator
349 // information.
350 DominatorTree &DT;
351
352public:
353 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
354 : LVIImpl(L), DT(DTree) {}
355
356 void emitBasicBlockStartAnnot(const BasicBlock *BB,
357 formatted_raw_ostream &OS) override;
358
359 void emitInstructionAnnot(const Instruction *I,
360 formatted_raw_ostream &OS) override;
361};
362} // namespace
363// The actual implementation of the lazy analysis and update. Note that the
364// inheritance from LazyValueInfoCache is intended to be temporary while
365// splitting the code and then transitioning to a has-a relationship.
367
368 /// Cached results from previous queries
369 LazyValueInfoCache TheCache;
370
371 /// This stack holds the state of the value solver during a query.
372 /// It basically emulates the callstack of the naive
373 /// recursive value lookup process.
375
376 /// Keeps track of which block-value pairs are in BlockValueStack.
378
379 /// Push BV onto BlockValueStack unless it's already in there.
380 /// Returns true on success.
381 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
382 if (!BlockValueSet.insert(BV).second)
383 return false; // It's already in the stack.
384
385 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
386 << BV.first->getName() << "\n");
387 BlockValueStack.push_back(BV);
388 return true;
389 }
390
391 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
392 const DataLayout &DL; ///< A mandatory DataLayout
393
394 /// Declaration of the llvm.experimental.guard() intrinsic,
395 /// if it exists in the module.
396 Function *GuardDecl;
397
398 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
399 Instruction *CxtI);
400 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
401 BasicBlock *T,
402 Instruction *CxtI = nullptr);
403
404 // These methods process one work item and may add more. A false value
405 // returned means that the work item was not completely processed and must
406 // be revisited after going through the new items.
407 bool solveBlockValue(Value *Val, BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
409 BasicBlock *BB);
410 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
413 BasicBlock *BB);
414 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
415 BasicBlock *BB);
416 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
417 BasicBlock *BB);
418 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
420 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
421 OpFn);
422 std::optional<ValueLatticeElement>
423 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
424 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
425 BasicBlock *BB);
426 std::optional<ValueLatticeElement>
427 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
428 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
429 BasicBlock *BB);
430 std::optional<ValueLatticeElement>
431 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
432 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
433 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
435 Instruction *BBI);
436
437 void solve();
438
439 // For the following methods, if UseBlockValue is true, the function may
440 // push additional values to the worklist and return nullopt. If
441 // UseBlockValue is false, it will never return nullopt.
442
443 std::optional<ValueLatticeElement>
444 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
445 const APInt &Offset, Instruction *CxtI,
446 bool UseBlockValue);
447
448 std::optional<ValueLatticeElement>
449 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
450 bool UseBlockValue);
451
452 std::optional<ValueLatticeElement>
453 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
454 bool UseBlockValue, unsigned Depth = 0);
455
456 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
457 BasicBlock *BBFrom,
458 BasicBlock *BBTo,
459 bool UseBlockValue);
460
461public:
462 /// This is the query interface to determine the lattice value for the
463 /// specified Value* at the context instruction (if specified) or at the
464 /// start of the block.
466 Instruction *CxtI = nullptr);
467
468 /// This is the query interface to determine the lattice value for the
469 /// specified Value* at the specified instruction using only information
470 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
471 /// recursive query is performed.
473
474 /// This is the query interface to determine the lattice
475 /// value for the specified Value* that is true on the specified edge.
477 BasicBlock *ToBB,
478 Instruction *CxtI = nullptr);
479
481
482 /// Complete flush all previously computed values
483 void clear() {
484 TheCache.clear();
485 }
486
487 /// Printing the LazyValueInfo Analysis.
489 LazyValueInfoAnnotatedWriter Writer(this, DTree);
490 F.print(OS, &Writer);
491 }
492
493 /// This is part of the update interface to remove information related to this
494 /// value from the cache.
495 void forgetValue(Value *V) { TheCache.eraseValue(V); }
496
497 /// This is part of the update interface to inform the cache
498 /// that a block has been deleted.
500 TheCache.eraseBlock(BB);
501 }
502
503 /// This is the update interface to inform the cache that an edge from
504 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
505 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
506
508 Function *GuardDecl)
509 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
510};
511} // namespace llvm
512
513void LazyValueInfoImpl::solve() {
515 BlockValueStack.begin(), BlockValueStack.end());
516
517 unsigned processedCount = 0;
518 while (!BlockValueStack.empty()) {
519 processedCount++;
520 // Abort if we have to process too many values to get a result for this one.
521 // Because of the design of the overdefined cache currently being per-block
522 // to avoid naming-related issues (IE it wants to try to give different
523 // results for the same name in different blocks), overdefined results don't
524 // get cached globally, which in turn means we will often try to rediscover
525 // the same overdefined result again and again. Once something like
526 // PredicateInfo is used in LVI or CVP, we should be able to make the
527 // overdefined cache global, and remove this throttle.
528 if (processedCount > MaxProcessedPerValue) {
530 dbgs() << "Giving up on stack because we are getting too deep\n");
531 // Fill in the original values
532 while (!StartingStack.empty()) {
533 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
534 TheCache.insertResult(e.second, e.first,
536 StartingStack.pop_back();
537 }
538 BlockValueSet.clear();
539 BlockValueStack.clear();
540 return;
541 }
542 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
543 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
544 unsigned StackSize = BlockValueStack.size();
545 (void) StackSize;
546
547 if (solveBlockValue(e.second, e.first)) {
548 // The work item was completely processed.
549 assert(BlockValueStack.size() == StackSize &&
550 BlockValueStack.back() == e && "Nothing should have been pushed!");
551#ifndef NDEBUG
552 std::optional<ValueLatticeElement> BBLV =
553 TheCache.getCachedValueInfo(e.second, e.first);
554 assert(BBLV && "Result should be in cache!");
556 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
557 << *BBLV << "\n");
558#endif
559
560 BlockValueStack.pop_back();
561 BlockValueSet.erase(e);
562 } else {
563 // More work needs to be done before revisiting.
564 assert(BlockValueStack.size() == StackSize + 1 &&
565 "Exactly one element should have been pushed!");
566 }
567 }
568}
569
570std::optional<ValueLatticeElement>
571LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
572 Instruction *CxtI) {
573 // If already a constant, there is nothing to compute.
574 if (Constant *VC = dyn_cast<Constant>(Val))
575 return ValueLatticeElement::get(VC);
576
577 if (std::optional<ValueLatticeElement> OptLatticeVal =
578 TheCache.getCachedValueInfo(Val, BB)) {
579 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
580 return OptLatticeVal;
581 }
582
583 // We have hit a cycle, assume overdefined.
584 if (!pushBlockValue({ BB, Val }))
586
587 // Yet to be resolved.
588 return std::nullopt;
589}
590
592 switch (BBI->getOpcode()) {
593 default:
594 break;
595 case Instruction::Call:
596 case Instruction::Invoke:
597 if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
599 [[fallthrough]];
600 case Instruction::Load:
601 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
602 if (isa<IntegerType>(BBI->getType())) {
605 }
606 break;
607 };
608 // Nothing known - will be intersected with other facts
610}
611
612bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
613 assert(!isa<Constant>(Val) && "Value should not be constant");
614 assert(!TheCache.getCachedValueInfo(Val, BB) &&
615 "Value should not be in cache");
616
617 // Hold off inserting this value into the Cache in case we have to return
618 // false and come back later.
619 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
620 if (!Res)
621 // Work pushed, will revisit
622 return false;
623
624 TheCache.insertResult(Val, BB, *Res);
625 return true;
626}
627
628std::optional<ValueLatticeElement>
629LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
630 Instruction *BBI = dyn_cast<Instruction>(Val);
631 if (!BBI || BBI->getParent() != BB)
632 return solveBlockValueNonLocal(Val, BB);
633
634 if (PHINode *PN = dyn_cast<PHINode>(BBI))
635 return solveBlockValuePHINode(PN, BB);
636
637 if (auto *SI = dyn_cast<SelectInst>(BBI))
638 return solveBlockValueSelect(SI, BB);
639
640 // If this value is a nonnull pointer, record it's range and bailout. Note
641 // that for all other pointer typed values, we terminate the search at the
642 // definition. We could easily extend this to look through geps, bitcasts,
643 // and the like to prove non-nullness, but it's not clear that's worth it
644 // compile time wise. The context-insensitive value walk done inside
645 // isKnownNonZero gets most of the profitable cases at much less expense.
646 // This does mean that we have a sensitivity to where the defining
647 // instruction is placed, even if it could legally be hoisted much higher.
648 // That is unfortunate.
649 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
650 if (PT && isKnownNonZero(BBI, DL))
652
653 if (BBI->getType()->isIntOrIntVectorTy()) {
654 if (auto *CI = dyn_cast<CastInst>(BBI))
655 return solveBlockValueCast(CI, BB);
656
657 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
658 return solveBlockValueBinaryOp(BO, BB);
659
660 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
661 return solveBlockValueExtractValue(EVI, BB);
662
663 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
664 return solveBlockValueIntrinsic(II, BB);
665 }
666
667 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
668 << "' - unknown inst def found.\n");
669 return getFromRangeMetadata(BBI);
670}
671
672static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
673 // TODO: Use NullPointerIsDefined instead.
674 if (Ptr->getType()->getPointerAddressSpace() == 0)
675 PtrSet.insert(getUnderlyingObject(Ptr));
676}
677
679 Instruction *I, NonNullPointerSet &PtrSet) {
680 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
681 AddNonNullPointer(L->getPointerOperand(), PtrSet);
682 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
683 AddNonNullPointer(S->getPointerOperand(), PtrSet);
684 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
685 if (MI->isVolatile()) return;
686
687 // FIXME: check whether it has a valuerange that excludes zero?
688 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
689 if (!Len || Len->isZero()) return;
690
691 AddNonNullPointer(MI->getRawDest(), PtrSet);
692 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
693 AddNonNullPointer(MTI->getRawSource(), PtrSet);
694 }
695}
696
697bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
700 return false;
701
702 Val = Val->stripInBoundsOffsets();
703 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
704 NonNullPointerSet NonNullPointers;
705 for (Instruction &I : *BB)
706 AddNonNullPointersByInstruction(&I, NonNullPointers);
707 return NonNullPointers;
708 });
709}
710
711std::optional<ValueLatticeElement>
712LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
713 ValueLatticeElement Result; // Start Undefined.
714
715 // If this is the entry block, we must be asking about an argument.
716 if (BB->isEntryBlock()) {
717 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
718 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
721 }
722
723 // Loop over all of our predecessors, merging what we know from them into
724 // result. If we encounter an unexplored predecessor, we eagerly explore it
725 // in a depth first manner. In practice, this has the effect of discovering
726 // paths we can't analyze eagerly without spending compile times analyzing
727 // other paths. This heuristic benefits from the fact that predecessors are
728 // frequently arranged such that dominating ones come first and we quickly
729 // find a path to function entry. TODO: We should consider explicitly
730 // canonicalizing to make this true rather than relying on this happy
731 // accident.
732 for (BasicBlock *Pred : predecessors(BB)) {
733 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
734 if (!EdgeResult)
735 // Explore that input, then return here
736 return std::nullopt;
737
738 Result.mergeIn(*EdgeResult);
739
740 // If we hit overdefined, exit early. The BlockVals entry is already set
741 // to overdefined.
742 if (Result.isOverdefined()) {
743 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
744 << "' - overdefined because of pred '"
745 << Pred->getName() << "' (non local).\n");
746 return Result;
747 }
748 }
749
750 // Return the merged value, which is more precise than 'overdefined'.
751 assert(!Result.isOverdefined());
752 return Result;
753}
754
755std::optional<ValueLatticeElement>
756LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
757 ValueLatticeElement Result; // Start Undefined.
758
759 // Loop over all of our predecessors, merging what we know from them into
760 // result. See the comment about the chosen traversal order in
761 // solveBlockValueNonLocal; the same reasoning applies here.
762 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
763 BasicBlock *PhiBB = PN->getIncomingBlock(i);
764 Value *PhiVal = PN->getIncomingValue(i);
765 // Note that we can provide PN as the context value to getEdgeValue, even
766 // though the results will be cached, because PN is the value being used as
767 // the cache key in the caller.
768 std::optional<ValueLatticeElement> EdgeResult =
769 getEdgeValue(PhiVal, PhiBB, BB, PN);
770 if (!EdgeResult)
771 // Explore that input, then return here
772 return std::nullopt;
773
774 Result.mergeIn(*EdgeResult);
775
776 // If we hit overdefined, exit early. The BlockVals entry is already set
777 // to overdefined.
778 if (Result.isOverdefined()) {
779 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
780 << "' - overdefined because of pred (local).\n");
781
782 return Result;
783 }
784 }
785
786 // Return the merged value, which is more precise than 'overdefined'.
787 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
788 return Result;
789}
790
791// If we can determine a constraint on the value given conditions assumed by
792// the program, intersect those constraints with BBLV
793void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
794 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
795 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
796 if (!BBI)
797 return;
798
799 BasicBlock *BB = BBI->getParent();
800 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
801 if (!AssumeVH)
802 continue;
803
804 // Only check assumes in the block of the context instruction. Other
805 // assumes will have already been taken into account when the value was
806 // propagated from predecessor blocks.
807 auto *I = cast<CallInst>(AssumeVH);
808 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
809 continue;
810
811 BBLV = intersect(BBLV, *getValueFromCondition(Val, I->getArgOperand(0),
812 /*IsTrueDest*/ true,
813 /*UseBlockValue*/ false));
814 }
815
816 // If guards are not used in the module, don't spend time looking for them
817 if (GuardDecl && !GuardDecl->use_empty() &&
818 BBI->getIterator() != BB->begin()) {
819 for (Instruction &I :
820 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
821 Value *Cond = nullptr;
822 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
823 BBLV = intersect(BBLV,
824 *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
825 /*UseBlockValue*/ false));
826 }
827 }
828
829 if (BBLV.isOverdefined()) {
830 // Check whether we're checking at the terminator, and the pointer has
831 // been dereferenced in this block.
832 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
833 if (PTy && BB->getTerminator() == BBI &&
834 isNonNullAtEndOfBlock(Val, BB))
836 }
837}
838
839std::optional<ValueLatticeElement>
840LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
841 // Recurse on our inputs if needed
842 std::optional<ValueLatticeElement> OptTrueVal =
843 getBlockValue(SI->getTrueValue(), BB, SI);
844 if (!OptTrueVal)
845 return std::nullopt;
846 ValueLatticeElement &TrueVal = *OptTrueVal;
847
848 std::optional<ValueLatticeElement> OptFalseVal =
849 getBlockValue(SI->getFalseValue(), BB, SI);
850 if (!OptFalseVal)
851 return std::nullopt;
852 ValueLatticeElement &FalseVal = *OptFalseVal;
853
854 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
855 const ConstantRange &TrueCR = TrueVal.asConstantRange(SI->getType());
856 const ConstantRange &FalseCR = FalseVal.asConstantRange(SI->getType());
857 Value *LHS = nullptr;
858 Value *RHS = nullptr;
859 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
860 // Is this a min specifically of our two inputs? (Avoid the risk of
861 // ValueTracking getting smarter looking back past our immediate inputs.)
863 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
864 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
865 ConstantRange ResultCR = [&]() {
866 switch (SPR.Flavor) {
867 default:
868 llvm_unreachable("unexpected minmax type!");
869 case SPF_SMIN: /// Signed minimum
870 return TrueCR.smin(FalseCR);
871 case SPF_UMIN: /// Unsigned minimum
872 return TrueCR.umin(FalseCR);
873 case SPF_SMAX: /// Signed maximum
874 return TrueCR.smax(FalseCR);
875 case SPF_UMAX: /// Unsigned maximum
876 return TrueCR.umax(FalseCR);
877 };
878 }();
880 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
881 FalseVal.isConstantRangeIncludingUndef());
882 }
883
884 if (SPR.Flavor == SPF_ABS) {
885 if (LHS == SI->getTrueValue())
887 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
888 if (LHS == SI->getFalseValue())
890 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
891 }
892
893 if (SPR.Flavor == SPF_NABS) {
895 if (LHS == SI->getTrueValue())
897 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
898 if (LHS == SI->getFalseValue())
900 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
901 }
902 }
903
904 // Can we constrain the facts about the true and false values by using the
905 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
906 // TODO: We could potentially refine an overdefined true value above.
907 Value *Cond = SI->getCondition();
908 // If the value is undef, a different value may be chosen in
909 // the select condition.
911 TrueVal =
912 intersect(TrueVal, *getValueFromCondition(SI->getTrueValue(), Cond,
913 /*IsTrueDest*/ true,
914 /*UseBlockValue*/ false));
915 FalseVal =
916 intersect(FalseVal, *getValueFromCondition(SI->getFalseValue(), Cond,
917 /*IsTrueDest*/ false,
918 /*UseBlockValue*/ false));
919 }
920
922 Result.mergeIn(FalseVal);
923 return Result;
924}
925
926std::optional<ConstantRange>
927LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
928 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
929 if (!OptVal)
930 return std::nullopt;
931 return OptVal->asConstantRange(V->getType());
932}
933
934std::optional<ValueLatticeElement>
935LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
936 // Filter out casts we don't know how to reason about before attempting to
937 // recurse on our operand. This can cut a long search short if we know we're
938 // not going to be able to get any useful information anways.
939 switch (CI->getOpcode()) {
940 case Instruction::Trunc:
941 case Instruction::SExt:
942 case Instruction::ZExt:
943 break;
944 default:
945 // Unhandled instructions are overdefined.
946 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
947 << "' - overdefined (unknown cast).\n");
949 }
950
951 // Figure out the range of the LHS. If that fails, we still apply the
952 // transfer rule on the full set since we may be able to locally infer
953 // interesting facts.
954 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
955 if (!LHSRes)
956 // More work to do before applying this transfer rule.
957 return std::nullopt;
958 const ConstantRange &LHSRange = *LHSRes;
959
960 const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
961
962 // NOTE: We're currently limited by the set of operations that ConstantRange
963 // can evaluate symbolically. Enhancing that set will allows us to analyze
964 // more definitions.
965 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
966 ResultBitWidth));
967}
968
969std::optional<ValueLatticeElement>
970LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
972 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
973 OpFn) {
974 // Figure out the ranges of the operands. If that fails, use a
975 // conservative range, but apply the transfer rule anyways. This
976 // lets us pick up facts from expressions like "and i32 (call i32
977 // @foo()), 32"
978 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
979 if (!LHSRes)
980 return std::nullopt;
981
982 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
983 if (!RHSRes)
984 return std::nullopt;
985
986 const ConstantRange &LHSRange = *LHSRes;
987 const ConstantRange &RHSRange = *RHSRes;
988 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
989}
990
991std::optional<ValueLatticeElement>
992LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
993 assert(BO->getOperand(0)->getType()->isSized() &&
994 "all operands to binary operators are sized");
995 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
996 unsigned NoWrapKind = OBO->getNoWrapKind();
997 return solveBlockValueBinaryOpImpl(
998 BO, BB,
999 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1000 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1001 });
1002 }
1003
1004 return solveBlockValueBinaryOpImpl(
1005 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1006 return CR1.binaryOp(BO->getOpcode(), CR2);
1007 });
1008}
1009
1010std::optional<ValueLatticeElement>
1011LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1012 BasicBlock *BB) {
1013 return solveBlockValueBinaryOpImpl(
1014 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1015 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1016 });
1017}
1018
1019std::optional<ValueLatticeElement>
1020LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1022 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1023 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1024 << "' - unknown intrinsic.\n");
1025 return MetadataVal;
1026 }
1027
1029 for (Value *Op : II->args()) {
1030 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1031 if (!Range)
1032 return std::nullopt;
1033 OpRanges.push_back(*Range);
1034 }
1035
1037 II->getIntrinsicID(), OpRanges)),
1038 MetadataVal);
1039}
1040
1041std::optional<ValueLatticeElement>
1042LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1043 BasicBlock *BB) {
1044 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1045 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1046 return solveBlockValueOverflowIntrinsic(WO, BB);
1047
1048 // Handle extractvalue of insertvalue to allow further simplification
1049 // based on replaced with.overflow intrinsics.
1051 EVI->getAggregateOperand(), EVI->getIndices(),
1052 EVI->getDataLayout()))
1053 return getBlockValue(V, BB, EVI);
1054
1055 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1056 << "' - overdefined (unknown extractvalue).\n");
1058}
1059
1060static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1061 ICmpInst::Predicate Pred) {
1062 if (LHS == Val)
1063 return true;
1064
1065 // Handle range checking idiom produced by InstCombine. We will subtract the
1066 // offset from the allowed range for RHS in this case.
1067 const APInt *C;
1068 if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1069 Offset = *C;
1070 return true;
1071 }
1072
1073 // Handle the symmetric case. This appears in saturation patterns like
1074 // (x == 16) ? 16 : (x + 1).
1075 if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1076 Offset = -*C;
1077 return true;
1078 }
1079
1080 // If (x | y) < C, then (x < C) && (y < C).
1081 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1082 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1083 return true;
1084
1085 // If (x & y) > C, then (x > C) && (y > C).
1086 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1087 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1088 return true;
1089
1090 return false;
1091}
1092
1093/// Get value range for a "(Val + Offset) Pred RHS" condition.
1094std::optional<ValueLatticeElement>
1095LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1096 Value *RHS,
1097 const APInt &Offset,
1098 Instruction *CxtI,
1099 bool UseBlockValue) {
1101 /*isFullSet=*/true);
1102 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1103 RHSRange = ConstantRange(CI->getValue());
1104 } else if (UseBlockValue) {
1105 std::optional<ValueLatticeElement> R =
1106 getBlockValue(RHS, CxtI->getParent(), CxtI);
1107 if (!R)
1108 return std::nullopt;
1109 RHSRange = R->asConstantRange(RHS->getType());
1110 }
1111
1112 ConstantRange TrueValues =
1114 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1115}
1116
1117static std::optional<ConstantRange>
1119 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1120 bool Invert = false;
1121 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1122 Pred = ICmpInst::getInversePredicate(Pred);
1123 Invert = true;
1124 }
1125 if (Pred == ICmpInst::ICMP_SLE) {
1126 Pred = ICmpInst::ICMP_SLT;
1127 if (RHS.isMaxSignedValue())
1128 return std::nullopt; // Could also return full/empty here, if we wanted.
1129 ++RHS;
1130 }
1131 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1132 if (auto CR = Fn(RHS))
1133 return Invert ? CR->inverse() : CR;
1134 return std::nullopt;
1135}
1136
1137std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1138 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1139 Value *LHS = ICI->getOperand(0);
1140 Value *RHS = ICI->getOperand(1);
1141
1142 // Get the predicate that must hold along the considered edge.
1143 CmpInst::Predicate EdgePred =
1144 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1145
1146 if (isa<Constant>(RHS)) {
1147 if (ICI->isEquality() && LHS == Val) {
1148 if (EdgePred == ICmpInst::ICMP_EQ)
1149 return ValueLatticeElement::get(cast<Constant>(RHS));
1150 else if (!isa<UndefValue>(RHS))
1151 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1152 }
1153 }
1154
1155 Type *Ty = Val->getType();
1156 if (!Ty->isIntegerTy())
1158
1159 unsigned BitWidth = Ty->getScalarSizeInBits();
1160 APInt Offset(BitWidth, 0);
1161 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1162 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1163 UseBlockValue);
1164
1165 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1166 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1167 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1168 UseBlockValue);
1169
1170 const APInt *Mask, *C;
1171 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1172 match(RHS, m_APInt(C))) {
1173 // If (Val & Mask) == C then all the masked bits are known and we can
1174 // compute a value range based on that.
1175 if (EdgePred == ICmpInst::ICMP_EQ) {
1176 KnownBits Known;
1177 Known.Zero = ~*C & *Mask;
1178 Known.One = *C & *Mask;
1180 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1181 }
1182
1183 if (EdgePred == ICmpInst::ICMP_NE)
1186 }
1187
1188 // If (X urem Modulus) >= C, then X >= C.
1189 // If trunc X >= C, then X >= C.
1190 // TODO: An upper bound could be computed as well.
1191 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1192 m_Trunc(m_Specific(Val)))) &&
1193 match(RHS, m_APInt(C))) {
1194 // Use the icmp region so we don't have to deal with different predicates.
1196 if (!CR.isEmptySet())
1199 }
1200
1201 // Recognize:
1202 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1203 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1204 const APInt *ShAmtC;
1205 if (CmpInst::isSigned(EdgePred) &&
1206 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1207 match(RHS, m_APInt(C))) {
1208 auto CR = getRangeViaSLT(
1209 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1210 APInt New = RHS << *ShAmtC;
1211 if ((New.ashr(*ShAmtC)) != RHS)
1212 return std::nullopt;
1214 APInt::getSignedMinValue(New.getBitWidth()), New);
1215 });
1216 if (CR)
1218 }
1219
1221}
1222
1223// Handle conditions of the form
1224// extractvalue(op.with.overflow(%x, C), 1).
1226 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1227 // TODO: This only works with a constant RHS for now. We could also compute
1228 // the range of the RHS, but this doesn't fit into the current structure of
1229 // the edge value calculation.
1230 const APInt *C;
1231 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1233
1234 // Calculate the possible values of %x for which no overflow occurs.
1236 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1237
1238 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1239 // constrained to it's inverse (all values that might cause overflow).
1240 if (IsTrueDest)
1241 NWR = NWR.inverse();
1243}
1244
1245std::optional<ValueLatticeElement>
1246LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1247 bool IsTrueDest, bool UseBlockValue,
1248 unsigned Depth) {
1249 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1250 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1251
1252 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1253 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1254 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1255 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1256
1259
1260 Value *N;
1261 if (match(Cond, m_Not(m_Value(N))))
1262 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1263
1264 Value *L, *R;
1265 bool IsAnd;
1266 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1267 IsAnd = true;
1268 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1269 IsAnd = false;
1270 else
1272
1273 std::optional<ValueLatticeElement> LV =
1274 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1275 if (!LV)
1276 return std::nullopt;
1277 std::optional<ValueLatticeElement> RV =
1278 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1279 if (!RV)
1280 return std::nullopt;
1281
1282 // if (L && R) -> intersect L and R
1283 // if (!(L || R)) -> intersect !L and !R
1284 // if (L || R) -> union L and R
1285 // if (!(L && R)) -> union !L and !R
1286 if (IsTrueDest ^ IsAnd) {
1287 LV->mergeIn(*RV);
1288 return *LV;
1289 }
1290
1291 return intersect(*LV, *RV);
1292}
1293
1294// Return true if Usr has Op as an operand, otherwise false.
1295static bool usesOperand(User *Usr, Value *Op) {
1296 return is_contained(Usr->operands(), Op);
1297}
1298
1299// Return true if the instruction type of Val is supported by
1300// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1301// Call this before calling constantFoldUser() to find out if it's even worth
1302// attempting to call it.
1303static bool isOperationFoldable(User *Usr) {
1304 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1305}
1306
1307// Check if Usr can be simplified to an integer constant when the value of one
1308// of its operands Op is an integer constant OpConstVal. If so, return it as an
1309// lattice value range with a single element or otherwise return an overdefined
1310// lattice value.
1312 const APInt &OpConstVal,
1313 const DataLayout &DL) {
1314 assert(isOperationFoldable(Usr) && "Precondition");
1315 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1316 // Check if Usr can be simplified to a constant.
1317 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1318 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1319 if (auto *C = dyn_cast_or_null<ConstantInt>(
1320 simplifyCastInst(CI->getOpcode(), OpConst,
1321 CI->getDestTy(), DL))) {
1322 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1323 }
1324 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1325 bool Op0Match = BO->getOperand(0) == Op;
1326 bool Op1Match = BO->getOperand(1) == Op;
1327 assert((Op0Match || Op1Match) &&
1328 "Operand 0 nor Operand 1 isn't a match");
1329 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1330 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1331 if (auto *C = dyn_cast_or_null<ConstantInt>(
1332 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1333 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1334 }
1335 } else if (isa<FreezeInst>(Usr)) {
1336 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1337 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1338 }
1340}
1341
1342/// Compute the value of Val on the edge BBFrom -> BBTo.
1343std::optional<ValueLatticeElement>
1344LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1345 BasicBlock *BBTo, bool UseBlockValue) {
1346 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1347 // know that v != 0.
1348 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1349 // If this is a conditional branch and only one successor goes to BBTo, then
1350 // we may be able to infer something from the condition.
1351 if (BI->isConditional() &&
1352 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1353 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1354 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1355 "BBTo isn't a successor of BBFrom");
1356 Value *Condition = BI->getCondition();
1357
1358 // If V is the condition of the branch itself, then we know exactly what
1359 // it is.
1360 // NB: The condition on a `br` can't be a vector type.
1361 if (Condition == Val)
1362 return ValueLatticeElement::get(ConstantInt::get(
1363 Type::getInt1Ty(Val->getContext()), isTrueDest));
1364
1365 // If the condition of the branch is an equality comparison, we may be
1366 // able to infer the value.
1367 std::optional<ValueLatticeElement> Result =
1368 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1369 if (!Result)
1370 return std::nullopt;
1371
1372 if (!Result->isOverdefined())
1373 return Result;
1374
1375 if (User *Usr = dyn_cast<User>(Val)) {
1376 assert(Result->isOverdefined() && "Result isn't overdefined");
1377 // Check with isOperationFoldable() first to avoid linearly iterating
1378 // over the operands unnecessarily which can be expensive for
1379 // instructions with many operands.
1380 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1381 const DataLayout &DL = BBTo->getDataLayout();
1382 if (usesOperand(Usr, Condition)) {
1383 // If Val has Condition as an operand and Val can be folded into a
1384 // constant with either Condition == true or Condition == false,
1385 // propagate the constant.
1386 // eg.
1387 // ; %Val is true on the edge to %then.
1388 // %Val = and i1 %Condition, true.
1389 // br %Condition, label %then, label %else
1390 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1391 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1392 } else {
1393 // If one of Val's operand has an inferred value, we may be able to
1394 // infer the value of Val.
1395 // eg.
1396 // ; %Val is 94 on the edge to %then.
1397 // %Val = add i8 %Op, 1
1398 // %Condition = icmp eq i8 %Op, 93
1399 // br i1 %Condition, label %then, label %else
1400 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1401 Value *Op = Usr->getOperand(i);
1402 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1403 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1404 if (std::optional<APInt> OpConst =
1405 OpLatticeVal.asConstantInteger()) {
1406 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1407 break;
1408 }
1409 }
1410 }
1411 }
1412 }
1413 if (!Result->isOverdefined())
1414 return Result;
1415 }
1416 }
1417
1418 // If the edge was formed by a switch on the value, then we may know exactly
1419 // what it is.
1420 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1421 Value *Condition = SI->getCondition();
1422 if (!isa<IntegerType>(Val->getType()))
1424 bool ValUsesConditionAndMayBeFoldable = false;
1425 if (Condition != Val) {
1426 // Check if Val has Condition as an operand.
1427 if (User *Usr = dyn_cast<User>(Val))
1428 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1429 usesOperand(Usr, Condition);
1430 if (!ValUsesConditionAndMayBeFoldable)
1432 }
1433 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1434 "Condition != Val nor Val doesn't use Condition");
1435
1436 bool DefaultCase = SI->getDefaultDest() == BBTo;
1437 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1438 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1439
1440 for (auto Case : SI->cases()) {
1441 APInt CaseValue = Case.getCaseValue()->getValue();
1442 ConstantRange EdgeVal(CaseValue);
1443 if (ValUsesConditionAndMayBeFoldable) {
1444 User *Usr = cast<User>(Val);
1445 const DataLayout &DL = BBTo->getDataLayout();
1446 ValueLatticeElement EdgeLatticeVal =
1447 constantFoldUser(Usr, Condition, CaseValue, DL);
1448 if (EdgeLatticeVal.isOverdefined())
1450 EdgeVal = EdgeLatticeVal.getConstantRange();
1451 }
1452 if (DefaultCase) {
1453 // It is possible that the default destination is the destination of
1454 // some cases. We cannot perform difference for those cases.
1455 // We know Condition != CaseValue in BBTo. In some cases we can use
1456 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1457 // only do this when f is identity (i.e. Val == Condition), but we
1458 // should be able to do this for any injective f.
1459 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1460 EdgesVals = EdgesVals.difference(EdgeVal);
1461 } else if (Case.getCaseSuccessor() == BBTo)
1462 EdgesVals = EdgesVals.unionWith(EdgeVal);
1463 }
1464 return ValueLatticeElement::getRange(std::move(EdgesVals));
1465 }
1467}
1468
1469/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1470/// the basic block if the edge does not constrain Val.
1471std::optional<ValueLatticeElement>
1472LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1473 BasicBlock *BBTo, Instruction *CxtI) {
1474 // If already a constant, there is nothing to compute.
1475 if (Constant *VC = dyn_cast<Constant>(Val))
1476 return ValueLatticeElement::get(VC);
1477
1478 std::optional<ValueLatticeElement> LocalResult =
1479 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1480 if (!LocalResult)
1481 return std::nullopt;
1482
1483 if (hasSingleValue(*LocalResult))
1484 // Can't get any more precise here
1485 return LocalResult;
1486
1487 std::optional<ValueLatticeElement> OptInBlock =
1488 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1489 if (!OptInBlock)
1490 return std::nullopt;
1491 ValueLatticeElement &InBlock = *OptInBlock;
1492
1493 // We can use the context instruction (generically the ultimate instruction
1494 // the calling pass is trying to simplify) here, even though the result of
1495 // this function is generally cached when called from the solve* functions
1496 // (and that cached result might be used with queries using a different
1497 // context instruction), because when this function is called from the solve*
1498 // functions, the context instruction is not provided. When called from
1499 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1500 // but then the result is not cached.
1501 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1502
1503 return intersect(*LocalResult, InBlock);
1504}
1505
1507 Instruction *CxtI) {
1508 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1509 << BB->getName() << "'\n");
1510
1511 assert(BlockValueStack.empty() && BlockValueSet.empty());
1512 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1513 if (!OptResult) {
1514 solve();
1515 OptResult = getBlockValue(V, BB, CxtI);
1516 assert(OptResult && "Value not available after solving");
1517 }
1518
1519 ValueLatticeElement Result = *OptResult;
1520 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1521 return Result;
1522}
1523
1525 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1526 << "'\n");
1527
1528 if (auto *C = dyn_cast<Constant>(V))
1530
1532 if (auto *I = dyn_cast<Instruction>(V))
1533 Result = getFromRangeMetadata(I);
1534 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1535
1536 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1537 return Result;
1538}
1539
1541getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1542 Instruction *CxtI) {
1543 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1544 << FromBB->getName() << "' to '" << ToBB->getName()
1545 << "'\n");
1546
1547 std::optional<ValueLatticeElement> Result =
1548 getEdgeValue(V, FromBB, ToBB, CxtI);
1549 while (!Result) {
1550 // As the worklist only explicitly tracks block values (but not edge values)
1551 // we may have to call solve() multiple times, as the edge value calculation
1552 // may request additional block values.
1553 solve();
1554 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1555 }
1556
1557 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1558 return *Result;
1559}
1560
1562 Value *V = U.get();
1563 auto *CxtI = cast<Instruction>(U.getUser());
1564 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1565
1566 // Check whether the only (possibly transitive) use of the value is in a
1567 // position where V can be constrained by a select or branch condition.
1568 const Use *CurrU = &U;
1569 // TODO: Increase limit?
1570 const unsigned MaxUsesToInspect = 3;
1571 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1572 std::optional<ValueLatticeElement> CondVal;
1573 auto *CurrI = cast<Instruction>(CurrU->getUser());
1574 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1575 // If the value is undef, a different value may be chosen in
1576 // the select condition and at use.
1577 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1578 break;
1579 if (CurrU->getOperandNo() == 1)
1580 CondVal =
1581 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1582 /*UseBlockValue*/ false);
1583 else if (CurrU->getOperandNo() == 2)
1584 CondVal =
1585 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1586 /*UseBlockValue*/ false);
1587 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1588 // TODO: Use non-local query?
1589 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1590 PHI->getParent(), /*UseBlockValue*/ false);
1591 }
1592 if (CondVal)
1593 VL = intersect(VL, *CondVal);
1594
1595 // Only follow one-use chain, to allow direct intersection of conditions.
1596 // If there are multiple uses, we would have to intersect with the union of
1597 // all conditions at different uses.
1598 // Stop walking if we hit a non-speculatable instruction. Even if the
1599 // result is only used under a specific condition, executing the
1600 // instruction itself may cause side effects or UB already.
1601 // This also disallows looking through phi nodes: If the phi node is part
1602 // of a cycle, we might end up reasoning about values from different cycle
1603 // iterations (PR60629).
1604 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1605 break;
1606 CurrU = &*CurrI->use_begin();
1607 }
1608 return VL;
1609}
1610
1612 BasicBlock *NewSucc) {
1613 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1614}
1615
1616//===----------------------------------------------------------------------===//
1617// LazyValueInfo Impl
1618//===----------------------------------------------------------------------===//
1619
1621 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1622
1623 if (auto *Impl = Info.getImpl())
1624 Impl->clear();
1625
1626 // Fully lazy.
1627 return false;
1628}
1629
1631 AU.setPreservesAll();
1634}
1635
1637
1638/// This lazily constructs the LazyValueInfoImpl.
1639LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1640 if (!PImpl) {
1641 assert(M && "getCache() called with a null Module");
1642 const DataLayout &DL = M->getDataLayout();
1643 Function *GuardDecl =
1644 M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1645 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1646 }
1647 return *static_cast<LazyValueInfoImpl *>(PImpl);
1648}
1649
1650LazyValueInfoImpl *LazyValueInfo::getImpl() {
1651 if (!PImpl)
1652 return nullptr;
1653 return static_cast<LazyValueInfoImpl *>(PImpl);
1654}
1655
1657
1659 // If the cache was allocated, free it.
1660 if (auto *Impl = getImpl()) {
1661 delete &*Impl;
1662 PImpl = nullptr;
1663 }
1664}
1665
1668 // We need to invalidate if we have either failed to preserve this analyses
1669 // result directly or if any of its dependencies have been invalidated.
1670 auto PAC = PA.getChecker<LazyValueAnalysis>();
1671 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1672 return true;
1673
1674 return false;
1675}
1676
1678
1681 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1682
1683 return LazyValueInfo(&AC, &F.getDataLayout());
1684}
1685
1686/// Returns true if we can statically tell that this value will never be a
1687/// "useful" constant. In practice, this means we've got something like an
1688/// alloca or a malloc call for which a comparison against a constant can
1689/// only be guarding dead code. Note that we are potentially giving up some
1690/// precision in dead code (a constant result) in favour of avoiding a
1691/// expensive search for a easily answered common query.
1692static bool isKnownNonConstant(Value *V) {
1693 V = V->stripPointerCasts();
1694 // The return val of alloc cannot be a Constant.
1695 if (isa<AllocaInst>(V))
1696 return true;
1697 return false;
1698}
1699
1701 // Bail out early if V is known not to be a Constant.
1702 if (isKnownNonConstant(V))
1703 return nullptr;
1704
1705 BasicBlock *BB = CxtI->getParent();
1706 ValueLatticeElement Result =
1707 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1708
1709 if (Result.isConstant())
1710 return Result.getConstant();
1711 if (Result.isConstantRange()) {
1712 const ConstantRange &CR = Result.getConstantRange();
1713 if (const APInt *SingleVal = CR.getSingleElement())
1714 return ConstantInt::get(V->getType(), *SingleVal);
1715 }
1716 return nullptr;
1717}
1718
1720 bool UndefAllowed) {
1721 BasicBlock *BB = CxtI->getParent();
1722 ValueLatticeElement Result =
1723 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1724 return Result.asConstantRange(V->getType(), UndefAllowed);
1725}
1726
1728 bool UndefAllowed) {
1729 auto *Inst = cast<Instruction>(U.getUser());
1730 ValueLatticeElement Result =
1731 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1732 return Result.asConstantRange(U->getType(), UndefAllowed);
1733}
1734
1735/// Determine whether the specified value is known to be a
1736/// constant on the specified edge. Return null if not.
1738 BasicBlock *ToBB,
1739 Instruction *CxtI) {
1740 Module *M = FromBB->getModule();
1741 ValueLatticeElement Result =
1742 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1743
1744 if (Result.isConstant())
1745 return Result.getConstant();
1746 if (Result.isConstantRange()) {
1747 const ConstantRange &CR = Result.getConstantRange();
1748 if (const APInt *SingleVal = CR.getSingleElement())
1749 return ConstantInt::get(V->getType(), *SingleVal);
1750 }
1751 return nullptr;
1752}
1753
1755 BasicBlock *FromBB,
1756 BasicBlock *ToBB,
1757 Instruction *CxtI) {
1758 Module *M = FromBB->getModule();
1759 ValueLatticeElement Result =
1760 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1761 // TODO: Should undef be allowed here?
1762 return Result.asConstantRange(V->getType(), /*UndefAllowed*/ true);
1763}
1764
1766 const ValueLatticeElement &Val,
1767 const DataLayout &DL) {
1768 // If we know the value is a constant, evaluate the conditional.
1769 if (Val.isConstant())
1770 return ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1771
1772 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1773 if (Val.isConstantRange()) {
1774 const ConstantRange &CR = Val.getConstantRange();
1775 ConstantRange RHS = C->toConstantRange();
1776 if (CR.icmp(Pred, RHS))
1777 return ConstantInt::getTrue(ResTy);
1778 if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
1779 return ConstantInt::getFalse(ResTy);
1780 return nullptr;
1781 }
1782
1783 if (Val.isNotConstant()) {
1784 // If this is an equality comparison, we can try to fold it knowing that
1785 // "V != C1".
1786 if (Pred == ICmpInst::ICMP_EQ) {
1787 // !C1 == C -> false iff C1 == C.
1790 if (Res && Res->isNullValue())
1791 return ConstantInt::getFalse(ResTy);
1792 } else if (Pred == ICmpInst::ICMP_NE) {
1793 // !C1 != C -> true iff C1 == C.
1796 if (Res && Res->isNullValue())
1797 return ConstantInt::getTrue(ResTy);
1798 }
1799 return nullptr;
1800 }
1801
1802 return nullptr;
1803}
1804
1805/// Determine whether the specified value comparison with a constant is known to
1806/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1808 Constant *C, BasicBlock *FromBB,
1809 BasicBlock *ToBB,
1810 Instruction *CxtI) {
1811 Module *M = FromBB->getModule();
1812 ValueLatticeElement Result =
1813 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1814
1815 return getPredicateResult(Pred, C, Result, M->getDataLayout());
1816}
1817
1819 Constant *C, Instruction *CxtI,
1820 bool UseBlockValue) {
1821 // Is or is not NonNull are common predicates being queried. If
1822 // isKnownNonZero can tell us the result of the predicate, we can
1823 // return it quickly. But this is only a fastpath, and falling
1824 // through would still be correct.
1825 Module *M = CxtI->getModule();
1826 const DataLayout &DL = M->getDataLayout();
1827 if (V->getType()->isPointerTy() && C->isNullValue() &&
1828 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1829 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1830 if (Pred == ICmpInst::ICMP_EQ)
1831 return ConstantInt::getFalse(ResTy);
1832 else if (Pred == ICmpInst::ICMP_NE)
1833 return ConstantInt::getTrue(ResTy);
1834 }
1835
1836 auto &Impl = getOrCreateImpl(M);
1837 ValueLatticeElement Result =
1838 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1839 : Impl.getValueAt(V, CxtI);
1840 Constant *Ret = getPredicateResult(Pred, C, Result, DL);
1841 if (Ret)
1842 return Ret;
1843
1844 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1845 // LVI as a whole tries to compute a lattice value which is conservatively
1846 // correct at a given location. In this case, we have a predicate which we
1847 // weren't able to prove about the merged result, and we're pushing that
1848 // predicate back along each incoming edge to see if we can prove it
1849 // separately for each input. As a motivating example, consider:
1850 // bb1:
1851 // %v1 = ... ; constantrange<1, 5>
1852 // br label %merge
1853 // bb2:
1854 // %v2 = ... ; constantrange<10, 20>
1855 // br label %merge
1856 // merge:
1857 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1858 // %pred = icmp eq i32 %phi, 8
1859 // We can't tell from the lattice value for '%phi' that '%pred' is false
1860 // along each path, but by checking the predicate over each input separately,
1861 // we can.
1862 // We limit the search to one step backwards from the current BB and value.
1863 // We could consider extending this to search further backwards through the
1864 // CFG and/or value graph, but there are non-obvious compile time vs quality
1865 // tradeoffs.
1866 BasicBlock *BB = CxtI->getParent();
1867
1868 // Function entry or an unreachable block. Bail to avoid confusing
1869 // analysis below.
1870 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1871 if (PI == PE)
1872 return nullptr;
1873
1874 // If V is a PHI node in the same block as the context, we need to ask
1875 // questions about the predicate as applied to the incoming value along
1876 // each edge. This is useful for eliminating cases where the predicate is
1877 // known along all incoming edges.
1878 if (auto *PHI = dyn_cast<PHINode>(V))
1879 if (PHI->getParent() == BB) {
1880 Constant *Baseline = nullptr;
1881 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1882 Value *Incoming = PHI->getIncomingValue(i);
1883 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1884 // Note that PredBB may be BB itself.
1885 Constant *Result =
1886 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1887
1888 // Keep going as long as we've seen a consistent known result for
1889 // all inputs.
1890 Baseline = (i == 0) ? Result /* First iteration */
1891 : (Baseline == Result ? Baseline
1892 : nullptr); /* All others */
1893 if (!Baseline)
1894 break;
1895 }
1896 if (Baseline)
1897 return Baseline;
1898 }
1899
1900 // For a comparison where the V is outside this block, it's possible
1901 // that we've branched on it before. Look to see if the value is known
1902 // on all incoming edges.
1903 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1904 // For predecessor edge, determine if the comparison is true or false
1905 // on that edge. If they're all true or all false, we can conclude
1906 // the value of the comparison in this block.
1907 Constant *Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1908 if (Baseline) {
1909 // Check that all remaining incoming values match the first one.
1910 while (++PI != PE) {
1911 Constant *Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1912 if (Ret != Baseline)
1913 break;
1914 }
1915 // If we terminated early, then one of the values didn't match.
1916 if (PI == PE) {
1917 return Baseline;
1918 }
1919 }
1920 }
1921
1922 return nullptr;
1923}
1924
1926 Value *RHS, Instruction *CxtI,
1927 bool UseBlockValue) {
1928 if (auto *C = dyn_cast<Constant>(RHS))
1929 return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
1930 if (auto *C = dyn_cast<Constant>(LHS))
1931 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1932 UseBlockValue);
1933
1934 // Got two non-Constant values. Try to determine the comparison results based
1935 // on the block values of the two operands, e.g. because they have
1936 // non-overlapping ranges.
1937 if (UseBlockValue) {
1938 Module *M = CxtI->getModule();
1940 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1941 if (L.isOverdefined())
1942 return nullptr;
1943
1945 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1947 return L.getCompare(Pred, Ty, R, M->getDataLayout());
1948 }
1949 return nullptr;
1950}
1951
1953 BasicBlock *NewSucc) {
1954 if (auto *Impl = getImpl())
1955 Impl->threadEdge(PredBB, OldSucc, NewSucc);
1956}
1957
1959 if (auto *Impl = getImpl())
1960 Impl->forgetValue(V);
1961}
1962
1964 if (auto *Impl = getImpl())
1965 Impl->eraseBlock(BB);
1966}
1967
1969 if (auto *Impl = getImpl())
1970 Impl->clear();
1971}
1972
1974 if (auto *Impl = getImpl())
1975 Impl->printLVI(F, DTree, OS);
1976}
1977
1978// Print the LVI for the function arguments at the start of each basic block.
1979void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1980 const BasicBlock *BB, formatted_raw_ostream &OS) {
1981 // Find if there are latticevalues defined for arguments of the function.
1982 auto *F = BB->getParent();
1983 for (const auto &Arg : F->args()) {
1984 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1985 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1986 if (Result.isUnknown())
1987 continue;
1988 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1989 }
1990}
1991
1992// This function prints the LVI analysis for the instruction I at the beginning
1993// of various basic blocks. It relies on calculated values that are stored in
1994// the LazyValueInfoCache, and in the absence of cached values, recalculate the
1995// LazyValueInfo for `I`, and print that info.
1996void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1998
1999 auto *ParentBB = I->getParent();
2000 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2001 // We can generate (solve) LVI values only for blocks that are dominated by
2002 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2003 // that contain redundant/uninteresting information, we print LVI for
2004 // blocks that may use this LVI information (such as immediate successor
2005 // blocks, and blocks that contain uses of `I`).
2006 auto printResult = [&](const BasicBlock *BB) {
2007 if (!BlocksContainingLVI.insert(BB).second)
2008 return;
2009 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2010 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2011 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2012 BB->printAsOperand(OS, false);
2013 OS << "' is: " << Result << "\n";
2014 };
2015
2016 printResult(ParentBB);
2017 // Print the LVI analysis results for the immediate successor blocks, that
2018 // are dominated by `ParentBB`.
2019 for (const auto *BBSucc : successors(ParentBB))
2020 if (DT.dominates(ParentBB, BBSucc))
2021 printResult(BBSucc);
2022
2023 // Print LVI in blocks where `I` is used.
2024 for (const auto *U : I->users())
2025 if (auto *UseI = dyn_cast<Instruction>(U))
2026 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2027 printResult(UseI->getParent());
2028
2029}
2030
2033 OS << "LVI for function '" << F.getName() << "':\n";
2034 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2035 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2036 LVI.printLVI(F, DTree, OS);
2037 return PreservedAnalyses::all();
2038}
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
basic Basic Alias true
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:148
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseSet and SmallDenseSet classes.
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static bool isOperationFoldable(User *Usr)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static bool hasSingleValue(const ValueLatticeElement &Val)
Returns true if this lattice value represents at most one possible value.
static const unsigned MaxProcessedPerValue
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
static ValueLatticeElement intersect(const ValueLatticeElement &A, const ValueLatticeElement &B)
Combine two sets of facts about the same value into a single set of facts.
static Constant * getPredicateResult(CmpInst::Predicate Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
lazy value info
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
Natural Loop Information
Definition: LoopInfo.cpp:1209
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool InBlock(const Value *V, const BasicBlock *BB)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:199
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:180
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void clear()
Clear the cache of @llvm.assume intrinsics for a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:294
reverse_iterator rend()
Definition: BasicBlock.h:456
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
const Instruction & back() const
Definition: BasicBlock.h:463
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:290
Value * getRHS() const
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
Conditional or Unconditional Branch instruction.
Value handle with callbacks on RAUW and destruction.
Definition: ValueHandle.h:383
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:694
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:701
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1104
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:857
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1800
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
static ConstantRange makeMaskNotEqualRange(const APInt &Mask, const APInt &C)
Initialize a range containing all values X that satisfy (X & Mask) != C.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
Definition: ConstantRange.h:84
ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:400
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:180
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
Definition: LazyValueInfo.h:34
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Constant * getPredicateOnEdge(CmpInst::Predicate Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
Constant * getPredicateAt(CmpInst::Predicate Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
Definition: Instructions.h:174
Metadata node.
Definition: Metadata.h:1067
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
Multiway switch.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents lattice values for constants.
Definition: ValueLattice.h:29
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
Definition: ValueLattice.h:214
static ValueLatticeElement getNot(Constant *C)
Definition: ValueLattice.h:208
std::optional< APInt > asConstantInteger() const
Definition: ValueLattice.h:275
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
Definition: ValueLattice.h:269
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
Definition: ValueLattice.h:249
static ValueLatticeElement get(Constant *C)
Definition: ValueLattice.h:203
Constant * getNotConstant() const
Definition: ValueLattice.h:260
Constant * getConstant() const
Definition: ValueLattice.h:255
static ValueLatticeElement getOverdefined()
Definition: ValueLattice.h:231
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:786
use_iterator use_begin()
Definition: Value.h:360
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:5105
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition: DenseSet.h:195
bool erase(const ValueT &V)
Definition: DenseSet.h:101
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:811
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1071
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
constexpr double e
Definition: MathExtras.h:47
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
@ Offset
Definition: DWP.cpp:480
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:48
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
void initializeLazyValueInfoWrapperPassPass(PassRegistry &)
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?