LLVM 23.0.0git
SCCPSolver.cpp
Go to the documentation of this file.
1//===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file
10// This file implements the Sparse Conditional Constant Propagation (SCCP)
11// utility.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/SetVector.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/NoFolder.h"
30#include "llvm/Support/Debug.h"
34#include <cassert>
35#include <utility>
36#include <vector>
37
38using namespace llvm;
39using namespace PatternMatch;
40
41#define DEBUG_TYPE "sccp"
42
43// The maximum number of range extensions allowed for operations requiring
44// widening.
45static const unsigned MaxNumRangeExtensions = 10;
46
47/// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
52
53namespace llvm {
54
56 return LV.isConstant() ||
58}
59
63
65 Constant *Const = getConstantOrNull(V);
66 if (!Const)
67 return false;
68 // Replacing `musttail` instructions with constant breaks `musttail` invariant
69 // unless the call itself can be removed.
70 // Calls with "clang.arc.attachedcall" implicitly use the return value and
71 // those uses cannot be updated with a constant.
73 if (CB && ((CB->isMustTailCall() && !wouldInstructionBeTriviallyDead(CB)) ||
76
77 // Don't zap returns of the callee
78 if (F)
80
81 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
82 << " as a constant\n");
83 return false;
84 }
85
86 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
87
88 // Replaces all of the uses of a variable with uses of the constant.
89 V->replaceAllUsesWith(Const);
90 return true;
91}
92
93/// Helper for getting ranges from \p Solver. Instructions inserted during
94/// simplification are unavailable in the solver, so we return a full range for
95/// them.
97 const SmallPtrSetImpl<Value *> &InsertedValues) {
98 if (auto *Const = dyn_cast<Constant>(Op))
99 return Const->toConstantRange();
100 if (InsertedValues.contains(Op)) {
101 unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
102 return ConstantRange::getFull(Bitwidth);
103 }
104 return Solver.getLatticeValueFor(Op).asConstantRange(Op->getType(),
105 /*UndefAllowed=*/false);
106}
107
108/// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
109static bool refineInstruction(SCCPSolver &Solver,
110 const SmallPtrSetImpl<Value *> &InsertedValues,
111 Instruction &Inst) {
112 bool Changed = false;
113 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
114 return getRange(Op, Solver, InsertedValues);
115 };
116
118 if (Inst.hasNoSignedWrap() && Inst.hasNoUnsignedWrap())
119 return false;
120
121 auto RangeA = GetRange(Inst.getOperand(0));
122 auto RangeB = GetRange(Inst.getOperand(1));
123 if (!Inst.hasNoUnsignedWrap()) {
125 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
127 if (NUWRange.contains(RangeA)) {
129 Changed = true;
130 }
131 }
132 if (!Inst.hasNoSignedWrap()) {
134 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
136 if (NSWRange.contains(RangeA)) {
137 Inst.setHasNoSignedWrap();
138 Changed = true;
139 }
140 }
141 } else if (isa<PossiblyNonNegInst>(Inst) && !Inst.hasNonNeg()) {
142 auto Range = GetRange(Inst.getOperand(0));
143 if (Range.isAllNonNegative()) {
144 Inst.setNonNeg();
145 Changed = true;
146 }
147 } else if (TruncInst *TI = dyn_cast<TruncInst>(&Inst)) {
148 if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
149 return false;
150
151 auto Range = GetRange(Inst.getOperand(0));
152 uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
153 if (!TI->hasNoUnsignedWrap()) {
154 if (Range.getActiveBits() <= DestWidth) {
155 TI->setHasNoUnsignedWrap(true);
156 Changed = true;
157 }
158 }
159 if (!TI->hasNoSignedWrap()) {
160 if (Range.getMinSignedBits() <= DestWidth) {
161 TI->setHasNoSignedWrap(true);
162 Changed = true;
163 }
164 }
165 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&Inst)) {
166 if (GEP->hasNoUnsignedWrap() || !GEP->hasNoUnsignedSignedWrap())
167 return false;
168
169 if (all_of(GEP->indices(),
170 [&](Value *V) { return GetRange(V).isAllNonNegative(); })) {
171 GEP->setNoWrapFlags(GEP->getNoWrapFlags() |
173 Changed = true;
174 }
175 }
176
177 return Changed;
178}
179
180/// Try to replace signed instructions with their unsigned equivalent.
181static bool replaceSignedInst(SCCPSolver &Solver,
182 SmallPtrSetImpl<Value *> &InsertedValues,
183 Instruction &Inst) {
184 // Determine if a signed value is known to be >= 0.
185 auto isNonNegative = [&Solver, &InsertedValues](Value *V) {
186 return getRange(V, Solver, InsertedValues).isAllNonNegative();
187 };
188
189 Instruction *NewInst = nullptr;
190 switch (Inst.getOpcode()) {
191 case Instruction::SIToFP:
192 case Instruction::SExt: {
193 // If the source value is not negative, this is a zext/uitofp.
194 Value *Op0 = Inst.getOperand(0);
195 if (!isNonNegative(Op0))
196 return false;
197 NewInst = CastInst::Create(Inst.getOpcode() == Instruction::SExt
198 ? Instruction::ZExt
199 : Instruction::UIToFP,
200 Op0, Inst.getType(), "", Inst.getIterator());
201 NewInst->setNonNeg();
202 break;
203 }
204 case Instruction::AShr: {
205 // If the shifted value is not negative, this is a logical shift right.
206 Value *Op0 = Inst.getOperand(0);
207 if (!isNonNegative(Op0))
208 return false;
209 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", Inst.getIterator());
210 NewInst->setIsExact(Inst.isExact());
211 break;
212 }
213 case Instruction::SDiv:
214 case Instruction::SRem: {
215 // If both operands are not negative, this is the same as udiv/urem.
216 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
217 if (!isNonNegative(Op0) || !isNonNegative(Op1))
218 return false;
219 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
220 : Instruction::URem;
221 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", Inst.getIterator());
222 if (Inst.getOpcode() == Instruction::SDiv)
223 NewInst->setIsExact(Inst.isExact());
224 break;
225 }
226 default:
227 return false;
228 }
229
230 // Wire up the new instruction and update state.
231 assert(NewInst && "Expected replacement instruction");
232 NewInst->takeName(&Inst);
233 InsertedValues.insert(NewInst);
234 Inst.replaceAllUsesWith(NewInst);
235 NewInst->setDebugLoc(Inst.getDebugLoc());
236 Solver.removeLatticeValueFor(&Inst);
237 Inst.eraseFromParent();
238 return true;
239}
240
241/// Try to use \p Inst's value range from \p Solver to simplify it.
243 SmallPtrSetImpl<Value *> &InsertedValues,
244 Instruction &Inst) {
245 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
246 return getRange(Op, Solver, InsertedValues);
247 };
248
249 Value *X;
250 const APInt *RHSC;
251 // Remove masking operations.
252 if (match(&Inst, m_And(m_Value(X), m_LowBitMask(RHSC)))) {
253 ConstantRange LRange = GetRange(X);
254 if (LRange.getUnsignedMax().ule(*RHSC))
255 return X;
256 }
257
258 // Check if we can simplify [us]cmp(X, Y) to X - Y.
259 if (auto *Cmp = dyn_cast<CmpIntrinsic>(&Inst)) {
260 Value *LHS = Cmp->getOperand(0);
261 Value *RHS = Cmp->getOperand(1);
262 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
263 // Bail out on 1-bit comparisons.
264 if (BitWidth == 1)
265 return nullptr;
266 ConstantRange LRange = GetRange(LHS);
267 if (LRange.isSizeLargerThan(3))
268 return nullptr;
269 ConstantRange RRange = GetRange(RHS);
270 if (RRange.isSizeLargerThan(3))
271 return nullptr;
272 ConstantRange RHSLower = RRange.sub(APInt(BitWidth, 1));
273 ConstantRange RHSUpper = RRange.add(APInt(BitWidth, 1));
275 Cmp->isSigned() ? CmpInst::ICMP_SLE : CmpInst::ICMP_ULE;
276 if (!RHSLower.icmp(Pred, LRange) || !LRange.icmp(Pred, RHSUpper))
277 return nullptr;
278
279 IRBuilder<NoFolder> Builder(&Inst);
280 Value *Sub = Builder.CreateSub(LHS, RHS, Inst.getName(), /*HasNUW=*/false,
281 /*HasNSW=*/Cmp->isSigned());
282 InsertedValues.insert(Sub);
283 if (Sub->getType() != Inst.getType()) {
284 Sub = Builder.CreateSExtOrTrunc(Sub, Inst.getType());
285 InsertedValues.insert(Sub);
286 }
287 return Sub;
288 }
289
290 // Relax range checks.
291 if (auto *ICmp = dyn_cast<ICmpInst>(&Inst)) {
292 Value *X;
293 auto MatchTwoInstructionExactRangeCheck =
294 [&]() -> std::optional<ConstantRange> {
295 const APInt *RHSC;
296 if (!match(ICmp->getOperand(1), m_APInt(RHSC)))
297 return std::nullopt;
298
299 Value *LHS = ICmp->getOperand(0);
300 ICmpInst::Predicate Pred = ICmp->getPredicate();
301 const APInt *Offset;
303 return ConstantRange::makeExactICmpRegion(Pred, *RHSC).sub(*Offset);
304 // Match icmp eq/ne X & NegPow2, C
305 if (ICmp->isEquality()) {
306 const APInt *Mask;
307 if (match(LHS, m_OneUse(m_And(m_Value(X), m_NegatedPower2(Mask)))) &&
308 RHSC->countr_zero() >= Mask->countr_zero()) {
309 ConstantRange CR(*RHSC, *RHSC - *Mask);
310 return Pred == ICmpInst::ICMP_EQ ? CR : CR.inverse();
311 }
312 }
313 return std::nullopt;
314 };
315
316 if (auto CR = MatchTwoInstructionExactRangeCheck()) {
317 ConstantRange LRange = GetRange(X);
318 // Early exit if we know nothing about X.
319 if (LRange.isFullSet())
320 return nullptr;
321 auto ConvertCRToICmp =
322 [&](const std::optional<ConstantRange> &NewCR) -> Value * {
324 APInt RHS;
325 // Check if we can represent NewCR as an icmp predicate.
326 if (NewCR && NewCR->getEquivalentICmp(Pred, RHS)) {
327 IRBuilder<NoFolder> Builder(&Inst);
328 Value *NewICmp =
329 Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS));
330 InsertedValues.insert(NewICmp);
331 return NewICmp;
332 }
333 return nullptr;
334 };
335 // We are allowed to refine the comparison to either true or false for out
336 // of range inputs.
337 // Here we refine the comparison to false, and check if we can narrow the
338 // range check to a simpler test.
339 if (auto *V = ConvertCRToICmp(CR->exactIntersectWith(LRange)))
340 return V;
341 // Here we refine the comparison to true, i.e. we relax the range check.
342 if (auto *V = ConvertCRToICmp(CR->exactUnionWith(LRange.inverse())))
343 return V;
344 }
345 }
346
347 return nullptr;
348}
349
351 SmallPtrSetImpl<Value *> &InsertedValues,
352 Statistic &InstRemovedStat,
353 Statistic &InstReplacedStat) {
354 bool MadeChanges = false;
355 for (Instruction &Inst : make_early_inc_range(BB)) {
356 if (Inst.getType()->isVoidTy())
357 continue;
358 if (tryToReplaceWithConstant(&Inst)) {
360 Inst.eraseFromParent();
361
362 MadeChanges = true;
363 ++InstRemovedStat;
364 } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
365 MadeChanges = true;
366 ++InstReplacedStat;
367 } else if (refineInstruction(*this, InsertedValues, Inst)) {
368 MadeChanges = true;
369 } else if (auto *V = simplifyInstruction(*this, InsertedValues, Inst)) {
370 Inst.replaceAllUsesWith(V);
371 Inst.eraseFromParent();
372 ++InstRemovedStat;
373 MadeChanges = true;
374 }
375 }
376 return MadeChanges;
377}
378
380 BasicBlock *&NewUnreachableBB) const {
381 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
382 bool HasNonFeasibleEdges = false;
383 for (BasicBlock *Succ : successors(BB)) {
384 if (isEdgeFeasible(BB, Succ))
385 FeasibleSuccessors.insert(Succ);
386 else
387 HasNonFeasibleEdges = true;
388 }
389
390 // All edges feasible, nothing to do.
391 if (!HasNonFeasibleEdges)
392 return false;
393
394 // SCCP can only determine non-feasible edges for br, switch and indirectbr.
395 Instruction *TI = BB->getTerminator();
397 "Terminator must be a br, switch or indirectbr");
398
399 if (FeasibleSuccessors.size() == 0) {
400 // Branch on undef/poison, replace with unreachable.
403 for (BasicBlock *Succ : successors(BB)) {
404 Succ->removePredecessor(BB);
405 if (SeenSuccs.insert(Succ).second)
406 Updates.push_back({DominatorTree::Delete, BB, Succ});
407 }
408 TI->eraseFromParent();
409 new UnreachableInst(BB->getContext(), BB);
410 DTU.applyUpdatesPermissive(Updates);
411 } else if (FeasibleSuccessors.size() == 1) {
412 // Replace with an unconditional branch to the only feasible successor.
413 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
415 bool HaveSeenOnlyFeasibleSuccessor = false;
416 for (BasicBlock *Succ : successors(BB)) {
417 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
418 // Don't remove the edge to the only feasible successor the first time
419 // we see it. We still do need to remove any multi-edges to it though.
420 HaveSeenOnlyFeasibleSuccessor = true;
421 continue;
422 }
423
424 Succ->removePredecessor(BB);
425 Updates.push_back({DominatorTree::Delete, BB, Succ});
426 }
427
428 Instruction *BI = UncondBrInst::Create(OnlyFeasibleSuccessor, BB);
429 BI->setDebugLoc(TI->getDebugLoc());
430 TI->eraseFromParent();
431 DTU.applyUpdatesPermissive(Updates);
432 } else if (FeasibleSuccessors.size() > 1) {
435
436 // If the default destination is unfeasible it will never be taken. Replace
437 // it with a new block with a single Unreachable instruction.
438 BasicBlock *DefaultDest = SI->getDefaultDest();
439 if (!FeasibleSuccessors.contains(DefaultDest)) {
440 if (!NewUnreachableBB) {
441 NewUnreachableBB =
442 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
443 DefaultDest->getParent(), DefaultDest);
444 auto *UI =
445 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
446 UI->setDebugLoc(DebugLoc::getTemporary());
447 }
448
449 DefaultDest->removePredecessor(BB);
450 SI->setDefaultDest(NewUnreachableBB);
451 Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
452 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
453 }
454
455 for (auto CI = SI->case_begin(); CI != SI->case_end();) {
456 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
457 ++CI;
458 continue;
459 }
460
461 BasicBlock *Succ = CI->getCaseSuccessor();
462 Succ->removePredecessor(BB);
463 Updates.push_back({DominatorTree::Delete, BB, Succ});
464 SI.removeCase(CI);
465 // Don't increment CI, as we removed a case.
466 }
467
468 DTU.applyUpdatesPermissive(Updates);
469 } else {
470 llvm_unreachable("Must have at least one feasible successor");
471 }
472 return true;
473}
474
475static void inferAttribute(Function *F, unsigned AttrIndex,
476 const ValueLatticeElement &Val) {
477 // If there is a known constant range for the value, add range attribute.
478 if (Val.isConstantRange() && !Val.getConstantRange().isSingleElement()) {
479 // Do not add range attribute if the value may include undef.
481 return;
482
483 // Take the intersection of the existing attribute and the inferred range.
484 Attribute OldAttr = F->getAttributeAtIndex(AttrIndex, Attribute::Range);
486 if (OldAttr.isValid())
487 CR = CR.intersectWith(OldAttr.getRange());
488 F->addAttributeAtIndex(
489 AttrIndex, Attribute::get(F->getContext(), Attribute::Range, CR));
490 return;
491 }
492 // Infer nonnull attribute.
493 if (Val.isNotConstant() && Val.getNotConstant()->getType()->isPointerTy() &&
494 Val.getNotConstant()->isNullValue() &&
495 !F->hasAttributeAtIndex(AttrIndex, Attribute::NonNull)) {
496 F->addAttributeAtIndex(AttrIndex,
497 Attribute::get(F->getContext(), Attribute::NonNull));
498 }
499}
500
502 for (const auto &[F, ReturnValue] : getTrackedRetVals())
503 inferAttribute(F, AttributeList::ReturnIndex, ReturnValue);
504}
505
508 if (!isBlockExecutable(&F->front()))
509 continue;
510 for (Argument &A : F->args())
511 if (!A.getType()->isStructTy())
512 inferAttribute(F, AttributeList::FirstArgIndex + A.getArgNo(),
514 }
515}
516
517/// Helper class for SCCPSolver. This implements the instruction visitor and
518/// holds all the state.
519class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
520 const DataLayout &DL;
521 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
522 /// Basic blocks that are executable (but may not have been visited yet).
523 SmallPtrSet<BasicBlock *, 8> BBExecutable;
524 /// Basic blocks that are executable and have been visited at least once.
527 ValueState; // The state each value is in.
528
529 /// StructValueState - This maintains ValueState for values that have
530 /// StructType, for example for formal arguments, calls, insertelement, etc.
532
533 /// GlobalValue - If we are tracking any values for the contents of a global
534 /// variable, we keep a mapping from the constant accessor to the element of
535 /// the global, to the currently known value. If the value becomes
536 /// overdefined, it's entry is simply removed from this map.
538
539 /// TrackedRetVals - If we are tracking arguments into and the return
540 /// value out of a function, it will have an entry in this map, indicating
541 /// what the known return value for the function is.
543
544 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
545 /// that return multiple values.
547 TrackedMultipleRetVals;
548
549 /// The set of values whose lattice has been invalidated.
550 /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
551 DenseSet<Value *> Invalidated;
552
553 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
554 /// represented here for efficient lookup.
555 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
556
557 /// A list of functions whose return cannot be modified.
558 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
559
560 /// TrackingIncomingArguments - This is the set of functions for whose
561 /// arguments we make optimistic assumptions about and try to prove as
562 /// constants.
563 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
564
565 /// Worklist of instructions to re-visit. This only includes instructions
566 /// in blocks that have already been visited at least once.
568
569 /// Current instruction while visiting a block for the first time, used to
570 /// avoid unnecessary instruction worklist insertions. Null if an instruction
571 /// is visited outside a whole-block visitation.
572 Instruction *CurI = nullptr;
573
574 // The BasicBlock work list
576
577 /// KnownFeasibleEdges - Entries in this set are edges which have already had
578 /// PHI nodes retriggered.
579 using Edge = std::pair<BasicBlock *, BasicBlock *>;
580 DenseSet<Edge> KnownFeasibleEdges;
581
583
585
586 LLVMContext &Ctx;
587
588 BumpPtrAllocator PredicateInfoAllocator;
589
590private:
591 ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
593 }
594
595 /// Push instruction \p I to the worklist.
596 void pushToWorkList(Instruction *I);
597
598 /// Push users of value \p V to the worklist.
599 void pushUsersToWorkList(Value *V);
600
601 /// Like pushUsersToWorkList(), but also prints a debug message with the
602 /// updated value.
603 void pushUsersToWorkListMsg(ValueLatticeElement &IV, Value *V);
604
605 // markConstant - Make a value be marked as "constant". If the value
606 // is not already a constant, add it to the instruction work list so that
607 // the users of the instruction are updated later.
608 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
609 bool MayIncludeUndef = false);
610
611 bool markConstant(Value *V, Constant *C) {
612 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
613 return markConstant(ValueState[V], V, C);
614 }
615
616 bool markNotConstant(ValueLatticeElement &IV, Value *V, Constant *C);
617
618 bool markNotNull(ValueLatticeElement &IV, Value *V) {
619 return markNotConstant(IV, V, Constant::getNullValue(V->getType()));
620 }
621
622 /// markConstantRange - Mark the object as constant range with \p CR. If the
623 /// object is not a constant range with the range \p CR, add it to the
624 /// instruction work list so that the users of the instruction are updated
625 /// later.
626 bool markConstantRange(ValueLatticeElement &IV, Value *V,
627 const ConstantRange &CR);
628
629 // markOverdefined - Make a value be marked as "overdefined". If the
630 // value is not already overdefined, add it to the overdefined instruction
631 // work list so that the users of the instruction are updated later.
632 bool markOverdefined(ValueLatticeElement &IV, Value *V);
633
634 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
635 /// changes.
636 bool mergeInValue(ValueLatticeElement &IV, Value *V,
637 const ValueLatticeElement &MergeWithV,
639 /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
640
641 /// getValueState - Return the ValueLatticeElement object that corresponds to
642 /// the value. This function handles the case when the value hasn't been seen
643 /// yet by properly seeding constants etc.
644 ValueLatticeElement &getValueState(Value *V) {
645 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
646
647 auto I = ValueState.try_emplace(V);
648 ValueLatticeElement &LV = I.first->second;
649
650 if (!I.second)
651 return LV; // Common case, already in the map.
652
653 if (auto *C = dyn_cast<Constant>(V))
654 LV.markConstant(C); // Constants are constant
655
656 // All others are unknown by default.
657 return LV;
658 }
659
660 /// getStructValueState - Return the ValueLatticeElement object that
661 /// corresponds to the value/field pair. This function handles the case when
662 /// the value hasn't been seen yet by properly seeding constants etc.
663 ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
664 assert(V->getType()->isStructTy() && "Should use getValueState");
665 assert(i < cast<StructType>(V->getType())->getNumElements() &&
666 "Invalid element #");
667
668 auto I = StructValueState.insert(
669 std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
670 ValueLatticeElement &LV = I.first->second;
671
672 if (!I.second)
673 return LV; // Common case, already in the map.
674
675 if (auto *C = dyn_cast<Constant>(V)) {
676 Constant *Elt = C->getAggregateElement(i);
677
678 if (!Elt)
679 LV.markOverdefined(); // Unknown sort of constant.
680 else
681 LV.markConstant(Elt); // Constants are constant.
682 }
683
684 // All others are underdefined by default.
685 return LV;
686 }
687
688 /// Traverse the use-def chain of \p Call, marking itself and its users as
689 /// "unknown" on the way.
690 void invalidate(CallBase *Call) {
692 ToInvalidate.push_back(Call);
693
694 while (!ToInvalidate.empty()) {
695 Instruction *Inst = ToInvalidate.pop_back_val();
696
697 if (!Invalidated.insert(Inst).second)
698 continue;
699
700 if (!BBExecutable.count(Inst->getParent()))
701 continue;
702
703 Value *V = nullptr;
704 // For return instructions we need to invalidate the tracked returns map.
705 // Anything else has its lattice in the value map.
706 if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
707 Function *F = RetInst->getParent()->getParent();
708 if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
709 It->second = ValueLatticeElement();
710 V = F;
711 } else if (MRVFunctionsTracked.count(F)) {
712 auto *STy = cast<StructType>(F->getReturnType());
713 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
714 TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
715 V = F;
716 }
717 } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
718 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
719 if (auto It = StructValueState.find({Inst, I});
720 It != StructValueState.end()) {
721 It->second = ValueLatticeElement();
722 V = Inst;
723 }
724 }
725 } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
726 It->second = ValueLatticeElement();
727 V = Inst;
728 }
729
730 if (V) {
731 LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
732
733 for (User *U : V->users())
734 if (auto *UI = dyn_cast<Instruction>(U))
735 ToInvalidate.push_back(UI);
736
737 auto It = AdditionalUsers.find(V);
738 if (It != AdditionalUsers.end())
739 for (User *U : It->second)
740 if (auto *UI = dyn_cast<Instruction>(U))
741 ToInvalidate.push_back(UI);
742 }
743 }
744 }
745
746 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
747 /// work list if it is not already executable.
748 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
749
750 // getFeasibleSuccessors - Return a vector of booleans to indicate which
751 // successors are reachable from a given terminator instruction.
752 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
753
754 // Add U as additional user of V.
755 void addAdditionalUser(Value *V, User *U) { AdditionalUsers[V].insert(U); }
756
757 void handlePredicate(Instruction *I, Value *CopyOf, const PredicateBase *PI);
758 void handleCallOverdefined(CallBase &CB);
759 void handleCallResult(CallBase &CB);
760 void handleCallArguments(CallBase &CB);
761 void handleExtractOfWithOverflow(ExtractValueInst &EVI,
762 const WithOverflowInst *WO, unsigned Idx);
763 bool isInstFullyOverDefined(Instruction &Inst);
764
765private:
766 friend class InstVisitor<SCCPInstVisitor>;
767
768 // visit implementations - Something changed in this instruction. Either an
769 // operand made a transition, or the instruction is newly executable. Change
770 // the value type of I to reflect these changes if appropriate.
771 void visitPHINode(PHINode &I);
772
773 // Terminators
774
775 void visitReturnInst(ReturnInst &I);
776 void visitTerminator(Instruction &TI);
777
778 void visitCastInst(CastInst &I);
779 void visitSelectInst(SelectInst &I);
780 void visitUnaryOperator(Instruction &I);
781 void visitFreezeInst(FreezeInst &I);
782 void visitBinaryOperator(Instruction &I);
783 void visitCmpInst(CmpInst &I);
784 void visitExtractValueInst(ExtractValueInst &EVI);
785 void visitInsertValueInst(InsertValueInst &IVI);
786
787 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
788 markOverdefined(&CPI);
789 visitTerminator(CPI);
790 }
791
792 // Instructions that cannot be folded away.
793
794 void visitStoreInst(StoreInst &I);
795 void visitLoadInst(LoadInst &I);
796 void visitGetElementPtrInst(GetElementPtrInst &I);
797 void visitAllocaInst(AllocaInst &AI);
798
799 void visitInvokeInst(InvokeInst &II) {
800 visitCallBase(II);
801 visitTerminator(II);
802 }
803
804 void visitCallBrInst(CallBrInst &CBI) {
805 visitCallBase(CBI);
806 visitTerminator(CBI);
807 }
808
809 void visitCallBase(CallBase &CB);
810 void visitResumeInst(ResumeInst &I) { /*returns void*/
811 }
812 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
813 }
814 void visitFenceInst(FenceInst &I) { /*returns void*/
815 }
816
817 void visitInstruction(Instruction &I);
818
819public:
821 FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(
822 F, DT, AC, PredicateInfoAllocator)});
823 }
824
826 auto It = FnPredicateInfo.find(&F);
827 if (It == FnPredicateInfo.end())
828 return;
829
830 for (BasicBlock &BB : F) {
831 for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
832 if (auto *BC = dyn_cast<BitCastInst>(&Inst)) {
833 if (BC->getType() == BC->getOperand(0)->getType()) {
834 if (It->second->getPredicateInfoFor(&Inst)) {
835 Value *Op = BC->getOperand(0);
836 Inst.replaceAllUsesWith(Op);
837 Inst.eraseFromParent();
838 }
839 }
840 }
841 }
842 }
843 }
844
845 void visitCallInst(CallInst &I) { visitCallBase(I); }
846
848
850 auto It = FnPredicateInfo.find(I->getParent()->getParent());
851 if (It == FnPredicateInfo.end())
852 return nullptr;
853 return It->second->getPredicateInfoFor(I);
854 }
855
857 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
858 LLVMContext &Ctx)
859 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
860
862 // We only track the contents of scalar globals.
863 if (GV->getValueType()->isSingleValueType()) {
864 ValueLatticeElement &IV = TrackedGlobals[GV];
865 IV.markConstant(GV->getInitializer());
866 }
867 }
868
870 // Add an entry, F -> undef.
871 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
872 MRVFunctionsTracked.insert(F);
873 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
874 TrackedMultipleRetVals.try_emplace(std::make_pair(F, i));
875 } else if (!F->getReturnType()->isVoidTy())
876 TrackedRetVals.try_emplace(F);
877 }
878
880 MustPreserveReturnsInFunctions.insert(F);
881 }
882
884 return MustPreserveReturnsInFunctions.count(F);
885 }
886
888 TrackingIncomingArguments.insert(F);
889 }
890
892 return TrackingIncomingArguments.count(F);
893 }
894
896 return TrackingIncomingArguments;
897 }
898
899 void solve();
900
902
904
906 return BBExecutable.count(BB);
907 }
908
909 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
910
911 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
912 std::vector<ValueLatticeElement> StructValues;
913 auto *STy = dyn_cast<StructType>(V->getType());
914 assert(STy && "getStructLatticeValueFor() can be called only on structs");
915 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
916 auto I = StructValueState.find(std::make_pair(V, i));
917 assert(I != StructValueState.end() && "Value not in valuemap!");
918 StructValues.push_back(I->second);
919 }
920 return StructValues;
921 }
922
923 void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
924
925 /// Invalidate the Lattice Value of \p Call and its users after specializing
926 /// the call. Then recompute it.
928 // Calls to void returning functions do not need invalidation.
929 Function *F = Call->getCalledFunction();
930 (void)F;
931 assert(!F->getReturnType()->isVoidTy() &&
932 (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
933 "All non void specializations should be tracked");
934 invalidate(Call);
935 handleCallResult(*Call);
936 }
937
939 assert(!V->getType()->isStructTy() &&
940 "Should use getStructLatticeValueFor");
941 auto I = ValueState.find(V);
942 assert(I != ValueState.end() &&
943 "V not found in ValueState nor Paramstate map!");
944 return I->second;
945 }
946
948 return TrackedRetVals;
949 }
950
953 return TrackedGlobals;
954 }
955
957 return MRVFunctionsTracked;
958 }
959
961 if (auto *STy = dyn_cast<StructType>(V->getType()))
962 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
963 markOverdefined(getStructValueState(V, i), V);
964 else
965 markOverdefined(ValueState[V], V);
966 }
967
969 if (A->getType()->isIntOrIntVectorTy()) {
970 if (std::optional<ConstantRange> Range = A->getRange())
972 }
973 if (A->hasNonNullAttr())
975 // Assume nothing about the incoming arguments without attributes.
977 }
978
980 if (A->getType()->isStructTy())
981 return (void)markOverdefined(A);
982 mergeInValue(ValueState[A], A, getArgAttributeVL(A));
983 }
984
986
987 Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
988
990
992 const SmallVectorImpl<ArgInfo> &Args);
993
995 for (auto &BB : *F)
996 BBExecutable.erase(&BB);
997 }
998
1000 bool ResolvedUndefs = true;
1001 while (ResolvedUndefs) {
1002 solve();
1003 ResolvedUndefs = false;
1004 for (Function &F : M)
1005 ResolvedUndefs |= resolvedUndefsIn(F);
1006 }
1007 }
1008
1010 bool ResolvedUndefs = true;
1011 while (ResolvedUndefs) {
1012 solve();
1013 ResolvedUndefs = false;
1014 for (Function *F : WorkList)
1015 ResolvedUndefs |= resolvedUndefsIn(*F);
1016 }
1017 }
1018
1020 bool ResolvedUndefs = true;
1021 while (ResolvedUndefs) {
1022 solve();
1023 ResolvedUndefs = false;
1024 for (Value *V : Invalidated)
1025 if (auto *I = dyn_cast<Instruction>(V))
1026 ResolvedUndefs |= resolvedUndef(*I);
1027 }
1028 Invalidated.clear();
1029 }
1030};
1031
1032} // namespace llvm
1033
1035 if (!BBExecutable.insert(BB).second)
1036 return false;
1037 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
1038 BBWorkList.push_back(BB); // Add the block to the work list!
1039 return true;
1040}
1041
1042void SCCPInstVisitor::pushToWorkList(Instruction *I) {
1043 // If we're currently visiting a block, do not push any instructions in the
1044 // same blocks that are after the current one, as they will be visited
1045 // anyway. We do have to push updates to earlier instructions (e.g. phi
1046 // nodes or loads of tracked globals).
1047 if (CurI && I->getParent() == CurI->getParent() && !I->comesBefore(CurI))
1048 return;
1049 // Only push instructions in already visited blocks. Otherwise we'll handle
1050 // it when we visit the block for the first time.
1051 if (BBVisited.contains(I->getParent()))
1052 InstWorkList.insert(I);
1053}
1054
1055void SCCPInstVisitor::pushUsersToWorkList(Value *V) {
1056 for (User *U : V->users())
1057 if (auto *UI = dyn_cast<Instruction>(U))
1058 pushToWorkList(UI);
1059
1060 auto Iter = AdditionalUsers.find(V);
1061 if (Iter != AdditionalUsers.end()) {
1062 // Copy additional users before notifying them of changes, because new
1063 // users may be added, potentially invalidating the iterator.
1065 for (User *U : Iter->second)
1066 if (auto *UI = dyn_cast<Instruction>(U))
1067 ToNotify.push_back(UI);
1068 for (Instruction *UI : ToNotify)
1069 pushToWorkList(UI);
1070 }
1071}
1072
1073void SCCPInstVisitor::pushUsersToWorkListMsg(ValueLatticeElement &IV,
1074 Value *V) {
1075 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
1076 pushUsersToWorkList(V);
1077}
1078
1079bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
1080 Constant *C, bool MayIncludeUndef) {
1081 if (!IV.markConstant(C, MayIncludeUndef))
1082 return false;
1083 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
1084 pushUsersToWorkList(V);
1085 return true;
1086}
1087
1088bool SCCPInstVisitor::markNotConstant(ValueLatticeElement &IV, Value *V,
1089 Constant *C) {
1090 if (!IV.markNotConstant(C))
1091 return false;
1092 LLVM_DEBUG(dbgs() << "markNotConstant: " << *C << ": " << *V << '\n');
1093 pushUsersToWorkList(V);
1094 return true;
1095}
1096
1097bool SCCPInstVisitor::markConstantRange(ValueLatticeElement &IV, Value *V,
1098 const ConstantRange &CR) {
1099 if (!IV.markConstantRange(CR))
1100 return false;
1101 LLVM_DEBUG(dbgs() << "markConstantRange: " << CR << ": " << *V << '\n');
1102 pushUsersToWorkList(V);
1103 return true;
1104}
1105
1106bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
1107 if (!IV.markOverdefined())
1108 return false;
1109
1110 LLVM_DEBUG(dbgs() << "markOverdefined: ";
1111 if (auto *F = dyn_cast<Function>(V)) dbgs()
1112 << "Function '" << F->getName() << "'\n";
1113 else dbgs() << *V << '\n');
1114 // Only instructions go on the work list
1115 pushUsersToWorkList(V);
1116 return true;
1117}
1118
1120 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1121 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
1122 assert(It != TrackedMultipleRetVals.end());
1123 if (!SCCPSolver::isConstant(It->second))
1124 return false;
1125 }
1126 return true;
1127}
1128
1130 Type *Ty) const {
1131 if (LV.isConstant()) {
1132 Constant *C = LV.getConstant();
1133 assert(C->getType() == Ty && "Type mismatch");
1134 return C;
1135 }
1136
1137 if (LV.isConstantRange()) {
1138 const auto &CR = LV.getConstantRange();
1139 if (CR.getSingleElement())
1140 return ConstantInt::get(Ty, *CR.getSingleElement());
1141 }
1142 return nullptr;
1143}
1144
1146 Constant *Const = nullptr;
1147 if (V->getType()->isStructTy()) {
1148 std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
1150 return nullptr;
1151 std::vector<Constant *> ConstVals;
1152 auto *ST = cast<StructType>(V->getType());
1153 for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
1154 const ValueLatticeElement &LV = LVs[I];
1155 ConstVals.push_back(SCCPSolver::isConstant(LV)
1156 ? getConstant(LV, ST->getElementType(I))
1157 : UndefValue::get(ST->getElementType(I)));
1158 }
1159 Const = ConstantStruct::get(ST, ConstVals);
1160 } else {
1163 return nullptr;
1164 Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
1165 : UndefValue::get(V->getType());
1166 }
1167 assert(Const && "Constant is nullptr here!");
1168 return Const;
1169}
1170
1172 const SmallVectorImpl<ArgInfo> &Args) {
1173 assert(!Args.empty() && "Specialization without arguments");
1174 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
1175 "Functions should have the same number of arguments");
1176
1177 auto Iter = Args.begin();
1178 Function::arg_iterator NewArg = F->arg_begin();
1179 Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
1180 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
1181
1182 LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
1183 << NewArg->getNameOrAsOperand() << "\n");
1184
1185 // Mark the argument constants in the new function
1186 // or copy the lattice state over from the old function.
1187 if (Iter != Args.end() && Iter->Formal == &*OldArg) {
1188 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1189 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1190 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1191 NewValue.markConstant(Iter->Actual->getAggregateElement(I));
1192 }
1193 } else {
1194 ValueState[&*NewArg].markConstant(Iter->Actual);
1195 }
1196 ++Iter;
1197 } else {
1198 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1199 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1200 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1201 NewValue = StructValueState[{&*OldArg, I}];
1202 }
1203 } else {
1204 ValueLatticeElement &NewValue = ValueState[&*NewArg];
1205 NewValue = ValueState[&*OldArg];
1206 }
1207 }
1208 }
1209}
1210
1211void SCCPInstVisitor::visitInstruction(Instruction &I) {
1212 // All the instructions we don't do any special handling for just
1213 // go to overdefined.
1214 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
1215 markOverdefined(&I);
1216}
1217
1218bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
1219 const ValueLatticeElement &MergeWithV,
1221 if (IV.mergeIn(MergeWithV, Opts)) {
1222 pushUsersToWorkList(V);
1223 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
1224 << IV << "\n");
1225 return true;
1226 }
1227 return false;
1228}
1229
1230bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1231 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1232 return false; // This edge is already known to be executable!
1233
1234 if (!markBlockExecutable(Dest)) {
1235 // If the destination is already executable, we just made an *edge*
1236 // feasible that wasn't before. Revisit the PHI nodes in the block
1237 // because they have potentially new operands.
1238 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1239 << " -> " << Dest->getName() << '\n');
1240
1241 for (PHINode &PN : Dest->phis())
1242 pushToWorkList(&PN);
1243 }
1244 return true;
1245}
1246
1247// getFeasibleSuccessors - Return a vector of booleans to indicate which
1248// successors are reachable from a given terminator instruction.
1249void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1250 SmallVectorImpl<bool> &Succs) {
1251 Succs.resize(TI.getNumSuccessors());
1252 if (isa<UncondBrInst>(TI)) {
1253 Succs[0] = true;
1254 return;
1255 }
1256
1257 if (auto *BI = dyn_cast<CondBrInst>(&TI)) {
1258 const ValueLatticeElement &BCValue = getValueState(BI->getCondition());
1259 ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1260 if (!CI) {
1261 // Overdefined condition variables, and branches on unfoldable constant
1262 // conditions, mean the branch could go either way.
1263 if (!BCValue.isUnknownOrUndef())
1264 Succs[0] = Succs[1] = true;
1265 return;
1266 }
1267
1268 // Constant condition variables mean the branch can only go a single way.
1269 Succs[CI->isZero()] = true;
1270 return;
1271 }
1272
1273 // We cannot analyze special terminators, so consider all successors
1274 // executable.
1275 if (TI.isSpecialTerminator()) {
1276 Succs.assign(TI.getNumSuccessors(), true);
1277 return;
1278 }
1279
1280 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1281 if (!SI->getNumCases()) {
1282 Succs[0] = true;
1283 return;
1284 }
1285 const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1286 if (ConstantInt *CI =
1287 getConstantInt(SCValue, SI->getCondition()->getType())) {
1288 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1289 return;
1290 }
1291
1292 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1293 // is ready.
1294 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1295 const ConstantRange &Range = SCValue.getConstantRange();
1296 unsigned ReachableCaseCount = 0;
1297 for (const auto &Case : SI->cases()) {
1298 const APInt &CaseValue = Case.getCaseValue()->getValue();
1299 if (Range.contains(CaseValue)) {
1300 Succs[Case.getSuccessorIndex()] = true;
1301 ++ReachableCaseCount;
1302 }
1303 }
1304
1305 Succs[SI->case_default()->getSuccessorIndex()] =
1306 Range.isSizeLargerThan(ReachableCaseCount);
1307 return;
1308 }
1309
1310 // Overdefined or unknown condition? All destinations are executable!
1311 if (!SCValue.isUnknownOrUndef())
1312 Succs.assign(TI.getNumSuccessors(), true);
1313 return;
1314 }
1315
1316 // In case of indirect branch and its address is a blockaddress, we mark
1317 // the target as executable.
1318 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1319 // Casts are folded by visitCastInst.
1320 const ValueLatticeElement &IBRValue = getValueState(IBR->getAddress());
1322 getConstant(IBRValue, IBR->getAddress()->getType()));
1323 if (!Addr) { // Overdefined or unknown condition?
1324 // All destinations are executable!
1325 if (!IBRValue.isUnknownOrUndef())
1326 Succs.assign(TI.getNumSuccessors(), true);
1327 return;
1328 }
1329
1330 BasicBlock *T = Addr->getBasicBlock();
1331 assert(Addr->getFunction() == T->getParent() &&
1332 "Block address of a different function ?");
1333 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1334 // This is the target.
1335 if (IBR->getDestination(i) == T) {
1336 Succs[i] = true;
1337 return;
1338 }
1339 }
1340
1341 // If we didn't find our destination in the IBR successor list, then we
1342 // have undefined behavior. Its ok to assume no successor is executable.
1343 return;
1344 }
1345
1346 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1347 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1348}
1349
1350// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1351// block to the 'To' basic block is currently feasible.
1353 // Check if we've called markEdgeExecutable on the edge yet. (We could
1354 // be more aggressive and try to consider edges which haven't been marked
1355 // yet, but there isn't any need.)
1356 return KnownFeasibleEdges.count(Edge(From, To));
1357}
1358
1359// visit Implementations - Something changed in this instruction, either an
1360// operand made a transition, or the instruction is newly executable. Change
1361// the value type of I to reflect these changes if appropriate. This method
1362// makes sure to do the following actions:
1363//
1364// 1. If a phi node merges two constants in, and has conflicting value coming
1365// from different branches, or if the PHI node merges in an overdefined
1366// value, then the PHI node becomes overdefined.
1367// 2. If a phi node merges only constants in, and they all agree on value, the
1368// PHI node becomes a constant value equal to that.
1369// 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1370// 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1371// 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1372// 6. If a conditional branch has a value that is constant, make the selected
1373// destination executable
1374// 7. If a conditional branch has a value that is overdefined, make all
1375// successors executable.
1376void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1377 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1378 // and slow us down a lot. Just mark them overdefined.
1379 if (PN.getNumIncomingValues() > 64)
1380 return (void)markOverdefined(&PN);
1381
1382 if (isInstFullyOverDefined(PN))
1383 return;
1384 SmallVector<unsigned> FeasibleIncomingIndices;
1385 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1386 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1387 continue;
1388 FeasibleIncomingIndices.push_back(i);
1389 }
1390
1391 // Look at all of the executable operands of the PHI node. If any of them
1392 // are overdefined, the PHI becomes overdefined as well. If they are all
1393 // constant, and they agree with each other, the PHI becomes the identical
1394 // constant. If they are constant and don't agree, the PHI is a constant
1395 // range. If there are no executable operands, the PHI remains unknown.
1396 if (StructType *STy = dyn_cast<StructType>(PN.getType())) {
1397 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1398 ValueLatticeElement PhiState = getStructValueState(&PN, i);
1399 if (PhiState.isOverdefined())
1400 continue;
1401 for (unsigned j : FeasibleIncomingIndices) {
1402 const ValueLatticeElement &IV =
1403 getStructValueState(PN.getIncomingValue(j), i);
1404 PhiState.mergeIn(IV);
1405 if (PhiState.isOverdefined())
1406 break;
1407 }
1408 ValueLatticeElement &PhiStateRef = getStructValueState(&PN, i);
1409 mergeInValue(PhiStateRef, &PN, PhiState,
1410 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1411 FeasibleIncomingIndices.size() + 1));
1412 PhiStateRef.setNumRangeExtensions(
1413 std::max((unsigned)FeasibleIncomingIndices.size(),
1414 PhiStateRef.getNumRangeExtensions()));
1415 }
1416 } else {
1417 ValueLatticeElement PhiState = getValueState(&PN);
1418 for (unsigned i : FeasibleIncomingIndices) {
1419 const ValueLatticeElement &IV = getValueState(PN.getIncomingValue(i));
1420 PhiState.mergeIn(IV);
1421 if (PhiState.isOverdefined())
1422 break;
1423 }
1424 // We allow up to 1 range extension per active incoming value and one
1425 // additional extension. Note that we manually adjust the number of range
1426 // extensions to match the number of active incoming values. This helps to
1427 // limit multiple extensions caused by the same incoming value, if other
1428 // incoming values are equal.
1429 ValueLatticeElement &PhiStateRef = ValueState[&PN];
1430 mergeInValue(PhiStateRef, &PN, PhiState,
1431 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1432 FeasibleIncomingIndices.size() + 1));
1433 PhiStateRef.setNumRangeExtensions(
1434 std::max((unsigned)FeasibleIncomingIndices.size(),
1435 PhiStateRef.getNumRangeExtensions()));
1436 }
1437}
1438
1439void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1440 if (I.getNumOperands() == 0)
1441 return; // ret void
1442
1443 Function *F = I.getParent()->getParent();
1444 Value *ResultOp = I.getOperand(0);
1445
1446 // If we are tracking the return value of this function, merge it in.
1447 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1448 auto TFRVI = TrackedRetVals.find(F);
1449 if (TFRVI != TrackedRetVals.end()) {
1450 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1451 return;
1452 }
1453 }
1454
1455 // Handle functions that return multiple values.
1456 if (!TrackedMultipleRetVals.empty()) {
1457 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1458 if (MRVFunctionsTracked.count(F))
1459 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1460 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1461 getStructValueState(ResultOp, i));
1462 }
1463}
1464
1465void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1466 SmallVector<bool, 16> SuccFeasible;
1467 getFeasibleSuccessors(TI, SuccFeasible);
1468
1469 BasicBlock *BB = TI.getParent();
1470
1471 // Mark all feasible successors executable.
1472 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1473 if (SuccFeasible[i])
1474 markEdgeExecutable(BB, TI.getSuccessor(i));
1475}
1476
1477void SCCPInstVisitor::visitCastInst(CastInst &I) {
1478 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1479 // discover a concrete value later.
1480 if (ValueState[&I].isOverdefined())
1481 return;
1482
1483 if (auto *BC = dyn_cast<BitCastInst>(&I)) {
1484 if (BC->getType() == BC->getOperand(0)->getType()) {
1485 if (const PredicateBase *PI = getPredicateInfoFor(&I)) {
1486 handlePredicate(&I, I.getOperand(0), PI);
1487 return;
1488 }
1489 }
1490 }
1491
1492 const ValueLatticeElement &OpSt = getValueState(I.getOperand(0));
1493 if (OpSt.isUnknownOrUndef())
1494 return;
1495
1496 if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1497 // Fold the constant as we build.
1498 if (Constant *C =
1499 ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL)) {
1500 auto &LV = ValueState[&I];
1501 mergeInValue(LV, &I, ValueLatticeElement::get(C));
1502 return;
1503 }
1504 }
1505
1506 // Ignore bitcasts, as they may change the number of vector elements.
1507 if (I.getDestTy()->isIntOrIntVectorTy() &&
1508 I.getSrcTy()->isIntOrIntVectorTy() &&
1509 I.getOpcode() != Instruction::BitCast) {
1510 ConstantRange OpRange =
1511 OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
1512 auto &LV = getValueState(&I);
1513
1514 Type *DestTy = I.getDestTy();
1515 ConstantRange Res = ConstantRange::getEmpty(DestTy->getScalarSizeInBits());
1516 if (auto *Trunc = dyn_cast<TruncInst>(&I))
1517 Res = OpRange.truncate(DestTy->getScalarSizeInBits(),
1518 Trunc->getNoWrapKind());
1519 else
1520 Res = OpRange.castOp(I.getOpcode(), DestTy->getScalarSizeInBits());
1521 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1522 } else
1523 markOverdefined(&I);
1524}
1525
1526void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1527 const WithOverflowInst *WO,
1528 unsigned Idx) {
1529 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1530 Type *Ty = LHS->getType();
1531
1532 addAdditionalUser(LHS, &EVI);
1533 addAdditionalUser(RHS, &EVI);
1534
1535 const ValueLatticeElement &L = getValueState(LHS);
1536 if (L.isUnknownOrUndef())
1537 return; // Wait to resolve.
1538 ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
1539
1540 const ValueLatticeElement &R = getValueState(RHS);
1541 if (R.isUnknownOrUndef())
1542 return; // Wait to resolve.
1543
1544 ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
1545 if (Idx == 0) {
1546 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1547 mergeInValue(ValueState[&EVI], &EVI, ValueLatticeElement::getRange(Res));
1548 } else {
1549 assert(Idx == 1 && "Index can only be 0 or 1");
1550 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1551 WO->getBinaryOp(), RR, WO->getNoWrapKind());
1552 if (NWRegion.contains(LR))
1553 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1554 markOverdefined(&EVI);
1555 }
1556}
1557
1558void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1559 // If this returns a struct, mark all elements over defined, we don't track
1560 // structs in structs.
1561 if (EVI.getType()->isStructTy())
1562 return (void)markOverdefined(&EVI);
1563
1564 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1565 // discover a concrete value later.
1566 if (ValueState[&EVI].isOverdefined())
1567 return (void)markOverdefined(&EVI);
1568
1569 // If this is extracting from more than one level of struct, we don't know.
1570 if (EVI.getNumIndices() != 1)
1571 return (void)markOverdefined(&EVI);
1572
1573 Value *AggVal = EVI.getAggregateOperand();
1574 if (AggVal->getType()->isStructTy()) {
1575 unsigned i = *EVI.idx_begin();
1576 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1577 return handleExtractOfWithOverflow(EVI, WO, i);
1578 ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1579 mergeInValue(ValueState[&EVI], &EVI, EltVal);
1580 } else {
1581 // Otherwise, must be extracting from an array.
1582 return (void)markOverdefined(&EVI);
1583 }
1584}
1585
1586void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1587 auto *STy = dyn_cast<StructType>(IVI.getType());
1588 if (!STy)
1589 return (void)markOverdefined(&IVI);
1590
1591 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1592 // discover a concrete value later.
1593 if (ValueState[&IVI].isOverdefined())
1594 return (void)markOverdefined(&IVI);
1595
1596 // If this has more than one index, we can't handle it, drive all results to
1597 // undef.
1598 if (IVI.getNumIndices() != 1)
1599 return (void)markOverdefined(&IVI);
1600
1601 Value *Aggr = IVI.getAggregateOperand();
1602 unsigned Idx = *IVI.idx_begin();
1603
1604 // Compute the result based on what we're inserting.
1605 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1606 // This passes through all values that aren't the inserted element.
1607 if (i != Idx) {
1608 ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1609 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1610 continue;
1611 }
1612
1613 Value *Val = IVI.getInsertedValueOperand();
1614 if (Val->getType()->isStructTy())
1615 // We don't track structs in structs.
1616 markOverdefined(getStructValueState(&IVI, i), &IVI);
1617 else {
1618 ValueLatticeElement InVal = getValueState(Val);
1619 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1620 }
1621 }
1622}
1623
1624void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1625 // If this select returns a struct, just mark the result overdefined.
1626 // TODO: We could do a lot better than this if code actually uses this.
1627 if (I.getType()->isStructTy())
1628 return (void)markOverdefined(&I);
1629
1630 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1631 // discover a concrete value later.
1632 if (ValueState[&I].isOverdefined())
1633 return (void)markOverdefined(&I);
1634
1635 const ValueLatticeElement &CondValue = getValueState(I.getCondition());
1636 if (CondValue.isUnknownOrUndef())
1637 return;
1638
1639 if (ConstantInt *CondCB =
1640 getConstantInt(CondValue, I.getCondition()->getType())) {
1641 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1642 const ValueLatticeElement &OpValState = getValueState(OpVal);
1643 // Safety: ValueState[&I] doesn't invalidate OpValState since it is already
1644 // in the map.
1645 assert(ValueState.contains(&I) && "&I is not in ValueState map.");
1646 mergeInValue(ValueState[&I], &I, OpValState);
1647 return;
1648 }
1649
1650 // Otherwise, the condition is overdefined or a constant we can't evaluate.
1651 // See if we can produce something better than overdefined based on the T/F
1652 // value.
1653 ValueLatticeElement TVal = getValueState(I.getTrueValue());
1654 ValueLatticeElement FVal = getValueState(I.getFalseValue());
1655
1656 ValueLatticeElement &State = ValueState[&I];
1657 bool Changed = State.mergeIn(TVal);
1658 Changed |= State.mergeIn(FVal);
1659 if (Changed)
1660 pushUsersToWorkListMsg(State, &I);
1661}
1662
1663// Handle Unary Operators.
1664void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1665 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1666
1667 ValueLatticeElement &IV = ValueState[&I];
1668 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1669 // discover a concrete value later.
1670 if (IV.isOverdefined())
1671 return (void)markOverdefined(&I);
1672
1673 // If something is unknown/undef, wait for it to resolve.
1674 if (V0State.isUnknownOrUndef())
1675 return;
1676
1677 if (SCCPSolver::isConstant(V0State))
1678 if (Constant *C = ConstantFoldUnaryOpOperand(
1679 I.getOpcode(), getConstant(V0State, I.getType()), DL))
1680 return (void)markConstant(IV, &I, C);
1681
1682 markOverdefined(&I);
1683}
1684
1685void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1686 // If this freeze returns a struct, just mark the result overdefined.
1687 // TODO: We could do a lot better than this.
1688 if (I.getType()->isStructTy())
1689 return (void)markOverdefined(&I);
1690
1691 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1692 ValueLatticeElement &IV = ValueState[&I];
1693 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1694 // discover a concrete value later.
1695 if (IV.isOverdefined())
1696 return (void)markOverdefined(&I);
1697
1698 // If something is unknown/undef, wait for it to resolve.
1699 if (V0State.isUnknownOrUndef())
1700 return;
1701
1702 if (SCCPSolver::isConstant(V0State) &&
1703 isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1704 return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1705
1706 markOverdefined(&I);
1707}
1708
1709// Handle Binary Operators.
1710void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1711 ValueLatticeElement V1State = getValueState(I.getOperand(0));
1712 ValueLatticeElement V2State = getValueState(I.getOperand(1));
1713
1714 ValueLatticeElement &IV = ValueState[&I];
1715 if (IV.isOverdefined())
1716 return;
1717
1718 // If something is undef, wait for it to resolve.
1719 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1720 return;
1721
1722 if (V1State.isOverdefined() && V2State.isOverdefined())
1723 return (void)markOverdefined(&I);
1724
1725 // If either of the operands is a constant, try to fold it to a constant.
1726 // TODO: Use information from notconstant better.
1727 if ((V1State.isConstant() || V2State.isConstant())) {
1728 Value *V1 = SCCPSolver::isConstant(V1State)
1729 ? getConstant(V1State, I.getOperand(0)->getType())
1730 : I.getOperand(0);
1731 Value *V2 = SCCPSolver::isConstant(V2State)
1732 ? getConstant(V2State, I.getOperand(1)->getType())
1733 : I.getOperand(1);
1734 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL, &I));
1735 auto *C = dyn_cast_or_null<Constant>(R);
1736 if (C) {
1737 // Conservatively assume that the result may be based on operands that may
1738 // be undef. Note that we use mergeInValue to combine the constant with
1739 // the existing lattice value for I, as different constants might be found
1740 // after one of the operands go to overdefined, e.g. due to one operand
1741 // being a special floating value.
1742 ValueLatticeElement NewV;
1743 NewV.markConstant(C, /*MayIncludeUndef=*/true);
1744 return (void)mergeInValue(ValueState[&I], &I, NewV);
1745 }
1746 }
1747
1748 // Only use ranges for binary operators on integers.
1749 if (!I.getType()->isIntOrIntVectorTy())
1750 return markOverdefined(&I);
1751
1752 // Try to simplify to a constant range.
1753 ConstantRange A =
1754 V1State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1755 ConstantRange B =
1756 V2State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1757
1758 auto *BO = cast<BinaryOperator>(&I);
1759 ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
1760 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO))
1761 R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
1762 else
1763 R = A.binaryOp(BO->getOpcode(), B);
1764 mergeInValue(ValueState[&I], &I, ValueLatticeElement::getRange(R));
1765
1766 // TODO: Currently we do not exploit special values that produce something
1767 // better than overdefined with an overdefined operand for vector or floating
1768 // point types, like and <4 x i32> overdefined, zeroinitializer.
1769}
1770
1771// Handle ICmpInst instruction.
1772void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1773 // Do not cache this lookup, getValueState calls later in the function might
1774 // invalidate the reference.
1775 if (ValueState[&I].isOverdefined())
1776 return (void)markOverdefined(&I);
1777
1778 Value *Op1 = I.getOperand(0);
1779 Value *Op2 = I.getOperand(1);
1780
1781 // For parameters, use ParamState which includes constant range info if
1782 // available.
1783 auto V1State = getValueState(Op1);
1784 auto V2State = getValueState(Op2);
1785
1786 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1787 if (C) {
1788 ValueLatticeElement CV;
1789 CV.markConstant(C);
1790 mergeInValue(ValueState[&I], &I, CV);
1791 return;
1792 }
1793
1794 // If operands are still unknown, wait for it to resolve.
1795 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1796 !SCCPSolver::isConstant(ValueState[&I]))
1797 return;
1798
1799 markOverdefined(&I);
1800}
1801
1802// Handle getelementptr instructions. If all operands are constants then we
1803// can turn this into a getelementptr ConstantExpr.
1804void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1805 if (ValueState[&I].isOverdefined())
1806 return (void)markOverdefined(&I);
1807
1808 const ValueLatticeElement &PtrState = getValueState(I.getPointerOperand());
1809 if (PtrState.isUnknownOrUndef())
1810 return;
1811
1812 // gep inbounds/nuw of non-null is non-null.
1813 if (PtrState.isNotConstant() && PtrState.getNotConstant()->isNullValue()) {
1814 if (I.hasNoUnsignedWrap() ||
1815 (I.isInBounds() &&
1816 !NullPointerIsDefined(I.getFunction(), I.getAddressSpace())))
1817 return (void)markNotNull(ValueState[&I], &I);
1818 return (void)markOverdefined(&I);
1819 }
1820
1822 Operands.reserve(I.getNumOperands());
1823
1824 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1825 const ValueLatticeElement &State = getValueState(I.getOperand(i));
1826 if (State.isUnknownOrUndef())
1827 return; // Operands are not resolved yet.
1828
1829 if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1830 Operands.push_back(C);
1831 continue;
1832 }
1833
1834 return (void)markOverdefined(&I);
1835 }
1836
1837 if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1838 markConstant(&I, C);
1839 else
1840 markOverdefined(&I);
1841}
1842
1843void SCCPInstVisitor::visitAllocaInst(AllocaInst &I) {
1844 if (!NullPointerIsDefined(I.getFunction(), I.getAddressSpace()))
1845 return (void)markNotNull(ValueState[&I], &I);
1846
1847 markOverdefined(&I);
1848}
1849
1850void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1851 // If this store is of a struct, ignore it.
1852 if (SI.getOperand(0)->getType()->isStructTy())
1853 return;
1854
1855 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1856 return;
1857
1858 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1859 auto I = TrackedGlobals.find(GV);
1860 if (I == TrackedGlobals.end())
1861 return;
1862
1863 // Get the value we are storing into the global, then merge it.
1864 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1865 ValueLatticeElement::MergeOptions().setCheckWiden(false));
1866 if (I->second.isOverdefined())
1867 TrackedGlobals.erase(I); // No need to keep tracking this!
1868}
1869
1871 if (const auto *CB = dyn_cast<CallBase>(I)) {
1872 if (CB->getType()->isIntOrIntVectorTy())
1873 if (std::optional<ConstantRange> Range = CB->getRange())
1875 if (CB->getType()->isPointerTy() && CB->isReturnNonNull())
1878 }
1879
1880 if (I->getType()->isIntOrIntVectorTy())
1881 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1884 if (I->hasMetadata(LLVMContext::MD_nonnull))
1887
1889}
1890
1891// Handle load instructions. If the operand is a constant pointer to a constant
1892// global, we can replace the load with the loaded constant value!
1893void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1894 // If this load is of a struct or the load is volatile, just mark the result
1895 // as overdefined.
1896 if (I.getType()->isStructTy() || I.isVolatile())
1897 return (void)markOverdefined(&I);
1898
1899 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1900 // discover a concrete value later.
1901 if (ValueState[&I].isOverdefined())
1902 return (void)markOverdefined(&I);
1903
1904 const ValueLatticeElement &PtrVal = getValueState(I.getOperand(0));
1905 if (PtrVal.isUnknownOrUndef())
1906 return; // The pointer is not resolved yet!
1907
1908 if (SCCPSolver::isConstant(PtrVal)) {
1909 Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1910 ValueLatticeElement &IV = ValueState[&I];
1911
1912 // load null is undefined.
1913 if (isa<ConstantPointerNull>(Ptr)) {
1914 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1915 return (void)markOverdefined(IV, &I);
1916 else
1917 return;
1918 }
1919
1920 // Transform load (constant global) into the value loaded.
1921 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1922 if (!TrackedGlobals.empty()) {
1923 // If we are tracking this global, merge in the known value for it.
1924 auto It = TrackedGlobals.find(GV);
1925 if (It != TrackedGlobals.end()) {
1926 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1927 return;
1928 }
1929 }
1930 }
1931
1932 // Transform load from a constant into a constant if possible.
1933 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1934 return (void)markConstant(IV, &I, C);
1935 }
1936
1937 // Fall back to metadata.
1938 mergeInValue(ValueState[&I], &I, getValueFromMetadata(&I));
1939}
1940
1941void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1942 handleCallResult(CB);
1943 handleCallArguments(CB);
1944}
1945
1946void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1948
1949 // Void return and not tracking callee, just bail.
1950 if (CB.getType()->isVoidTy())
1951 return;
1952
1953 // Always mark struct return as overdefined.
1954 if (CB.getType()->isStructTy())
1955 return (void)markOverdefined(&CB);
1956
1957 // Otherwise, if we have a single return value case, and if the function is
1958 // a declaration, maybe we can constant fold it.
1959 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1961 for (const Use &A : CB.args()) {
1962 if (A.get()->getType()->isStructTy())
1963 return markOverdefined(&CB); // Can't handle struct args.
1964 if (A.get()->getType()->isMetadataTy())
1965 continue; // Carried in CB, not allowed in Operands.
1966 const ValueLatticeElement &State = getValueState(A);
1967
1968 if (State.isUnknownOrUndef())
1969 return; // Operands are not resolved yet.
1970 if (SCCPSolver::isOverdefined(State))
1971 return (void)markOverdefined(&CB);
1972 assert(SCCPSolver::isConstant(State) && "Unknown state!");
1973 Operands.push_back(getConstant(State, A->getType()));
1974 }
1975
1976 if (SCCPSolver::isOverdefined(getValueState(&CB)))
1977 return (void)markOverdefined(&CB);
1978
1979 // If we can constant fold this, mark the result of the call as a
1980 // constant.
1981 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1982 return (void)markConstant(&CB, C);
1983 }
1984
1985 // Fall back to metadata.
1986 mergeInValue(ValueState[&CB], &CB, getValueFromMetadata(&CB));
1987}
1988
1989void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1991 // If this is a local function that doesn't have its address taken, mark its
1992 // entry block executable and merge in the actual arguments to the call into
1993 // the formal arguments of the function.
1994 if (TrackingIncomingArguments.count(F)) {
1995 markBlockExecutable(&F->front());
1996
1997 // Propagate information from this call site into the callee.
1998 auto CAI = CB.arg_begin();
1999 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2000 ++AI, ++CAI) {
2001 // If this argument is byval, and if the function is not readonly, there
2002 // will be an implicit copy formed of the input aggregate.
2003 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
2004 markOverdefined(&*AI);
2005 continue;
2006 }
2007
2008 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
2009 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2010 ValueLatticeElement CallArg = getStructValueState(*CAI, i);
2011 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
2013 }
2014 } else {
2015 ValueLatticeElement CallArg =
2016 getValueState(*CAI).intersect(getArgAttributeVL(&*AI));
2017 mergeInValue(ValueState[&*AI], &*AI, CallArg, getMaxWidenStepsOpts());
2018 }
2019 }
2020 }
2021}
2022
2023void SCCPInstVisitor::handlePredicate(Instruction *I, Value *CopyOf,
2024 const PredicateBase *PI) {
2025 ValueLatticeElement CopyOfVal = getValueState(CopyOf);
2026 const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
2027 if (!Constraint) {
2028 mergeInValue(ValueState[I], I, CopyOfVal);
2029 return;
2030 }
2031
2032 CmpInst::Predicate Pred = Constraint->Predicate;
2033 Value *OtherOp = Constraint->OtherOp;
2034
2035 // Wait until OtherOp is resolved.
2036 if (getValueState(OtherOp).isUnknown()) {
2037 addAdditionalUser(OtherOp, I);
2038 return;
2039 }
2040
2041 ValueLatticeElement CondVal = getValueState(OtherOp);
2042 ValueLatticeElement &IV = ValueState[I];
2043 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
2044 auto ImposedCR =
2045 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
2046
2047 // Get the range imposed by the condition.
2048 if (CondVal.isConstantRange())
2050 Pred, CondVal.getConstantRange());
2051
2052 // Combine range info for the original value with the new range from the
2053 // condition.
2054 auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
2055 /*UndefAllowed=*/true);
2056 // Treat an unresolved input like a full range.
2057 if (CopyOfCR.isEmptySet())
2058 CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
2059 auto NewCR = ImposedCR.intersectWith(CopyOfCR);
2060 // If the existing information is != x, do not use the information from
2061 // a chained predicate, as the != x information is more likely to be
2062 // helpful in practice.
2063 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
2064 NewCR = std::move(CopyOfCR);
2065
2066 // The new range is based on a branch condition. That guarantees that
2067 // neither of the compare operands can be undef in the branch targets,
2068 // unless we have conditions that are always true/false (e.g. icmp ule
2069 // i32, %a, i32_max). For the latter overdefined/empty range will be
2070 // inferred, but the branch will get folded accordingly anyways.
2071 addAdditionalUser(OtherOp, I);
2072 mergeInValue(
2073 IV, I, ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
2074 return;
2075 } else if (Pred == CmpInst::ICMP_EQ &&
2076 (CondVal.isConstant() || CondVal.isNotConstant())) {
2077 // For non-integer values or integer constant expressions, only
2078 // propagate equal constants or not-constants.
2079 addAdditionalUser(OtherOp, I);
2080 mergeInValue(IV, I, CondVal);
2081 return;
2082 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
2083 // Propagate inequalities.
2084 addAdditionalUser(OtherOp, I);
2085 mergeInValue(IV, I, ValueLatticeElement::getNot(CondVal.getConstant()));
2086 return;
2087 }
2088
2089 return (void)mergeInValue(IV, I, CopyOfVal);
2090}
2091
2092void SCCPInstVisitor::handleCallResult(CallBase &CB) {
2094
2095 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
2096 if (II->getIntrinsicID() == Intrinsic::vscale) {
2097 unsigned BitWidth = CB.getType()->getScalarSizeInBits();
2098 const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
2099 return (void)mergeInValue(ValueState[II], II,
2101 }
2102 if (II->getIntrinsicID() == Intrinsic::experimental_get_vector_length) {
2103 Value *CountArg = II->getArgOperand(0);
2104 Value *VF = II->getArgOperand(1);
2105 bool Scalable = cast<ConstantInt>(II->getArgOperand(2))->isOne();
2106
2107 // Computation happens in the larger type.
2108 unsigned BitWidth = std::max(CountArg->getType()->getScalarSizeInBits(),
2109 VF->getType()->getScalarSizeInBits());
2110
2111 ConstantRange Count = getValueState(CountArg)
2112 .asConstantRange(CountArg->getType(), false)
2113 .zeroExtend(BitWidth);
2114 ConstantRange MaxLanes = getValueState(VF)
2115 .asConstantRange(VF->getType(), false)
2116 .zeroExtend(BitWidth);
2117 if (Scalable)
2118 MaxLanes =
2119 MaxLanes.multiply(getVScaleRange(II->getFunction(), BitWidth));
2120
2121 // The result is always less than both Count and MaxLanes.
2122 ConstantRange Result = ConstantRange::getNonEmpty(
2124 APIntOps::umin(Count.getUnsignedMax(), MaxLanes.getUnsignedMax()) +
2125 1);
2126
2127 // If Count <= MaxLanes, getvectorlength(Count, MaxLanes) = Count
2128 if (Count.icmp(CmpInst::ICMP_ULE, MaxLanes))
2129 Result = std::move(Count);
2130
2131 Result = Result.truncate(II->getType()->getScalarSizeInBits());
2132 return (void)mergeInValue(ValueState[II], II,
2134 }
2135
2136 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
2137 // Compute result range for intrinsics supported by ConstantRange.
2138 // Do this even if we don't know a range for all operands, as we may
2139 // still know something about the result range, e.g. of abs(x).
2141 for (Value *Op : II->args()) {
2142 const ValueLatticeElement &State = getValueState(Op);
2143 if (State.isUnknownOrUndef())
2144 return;
2145 OpRanges.push_back(
2146 State.asConstantRange(Op->getType(), /*UndefAllowed=*/false));
2147 }
2148
2149 ConstantRange Result =
2150 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
2151 return (void)mergeInValue(ValueState[II], II,
2153 }
2154 }
2155
2156 // The common case is that we aren't tracking the callee, either because we
2157 // are not doing interprocedural analysis or the callee is indirect, or is
2158 // external. Handle these cases first.
2159 if (!F || F->isDeclaration())
2160 return handleCallOverdefined(CB);
2161
2162 // If this is a single/zero retval case, see if we're tracking the function.
2163 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
2164 if (!MRVFunctionsTracked.count(F))
2165 return handleCallOverdefined(CB); // Not tracking this callee.
2166
2167 // If we are tracking this callee, propagate the result of the function
2168 // into this call site.
2169 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2170 mergeInValue(getStructValueState(&CB, i), &CB,
2171 TrackedMultipleRetVals[std::make_pair(F, i)],
2173 } else {
2174 auto TFRVI = TrackedRetVals.find(F);
2175 if (TFRVI == TrackedRetVals.end())
2176 return handleCallOverdefined(CB); // Not tracking this callee.
2177
2178 // If so, propagate the return value of the callee into this call result.
2179 mergeInValue(ValueState[&CB], &CB, TFRVI->second, getMaxWidenStepsOpts());
2180 }
2181}
2182
2183bool SCCPInstVisitor::isInstFullyOverDefined(Instruction &Inst) {
2184 // For structure Type, we handle each member separately.
2185 // A structure object won't be considered as overdefined when
2186 // there is at least one member that is not overdefined.
2187 if (StructType *STy = dyn_cast<StructType>(Inst.getType())) {
2188 for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) {
2189 if (!getStructValueState(&Inst, i).isOverdefined())
2190 return false;
2191 }
2192 return true;
2193 }
2194
2195 return getValueState(&Inst).isOverdefined();
2196}
2197
2199 // Process the work lists until they are empty!
2200 while (!BBWorkList.empty() || !InstWorkList.empty()) {
2201 // Process the instruction work list.
2202 while (!InstWorkList.empty()) {
2203 Instruction *I = InstWorkList.pop_back_val();
2204 Invalidated.erase(I);
2205
2206 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
2207
2208 visit(I);
2209 }
2210
2211 // Process the basic block work list.
2212 while (!BBWorkList.empty()) {
2213 BasicBlock *BB = BBWorkList.pop_back_val();
2214 BBVisited.insert(BB);
2215
2216 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
2217 for (Instruction &I : *BB) {
2218 CurI = &I;
2219 visit(I);
2220 }
2221 CurI = nullptr;
2222 }
2223 }
2224}
2225
2227 // Look for instructions which produce undef values.
2228 if (I.getType()->isVoidTy())
2229 return false;
2230
2231 if (auto *STy = dyn_cast<StructType>(I.getType())) {
2232 // Only a few things that can be structs matter for undef.
2233
2234 // Tracked calls must never be marked overdefined in resolvedUndefsIn.
2235 if (auto *CB = dyn_cast<CallBase>(&I))
2236 if (Function *F = CB->getCalledFunction())
2237 if (MRVFunctionsTracked.count(F))
2238 return false;
2239
2240 // extractvalue and insertvalue don't need to be marked; they are
2241 // tracked as precisely as their operands.
2243 return false;
2244 // Send the results of everything else to overdefined. We could be
2245 // more precise than this but it isn't worth bothering.
2246 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2247 ValueLatticeElement &LV = getStructValueState(&I, i);
2248 if (LV.isUnknown()) {
2249 markOverdefined(LV, &I);
2250 return true;
2251 }
2252 }
2253 return false;
2254 }
2255
2256 ValueLatticeElement &LV = getValueState(&I);
2257 if (!LV.isUnknown())
2258 return false;
2259
2260 // There are two reasons a call can have an undef result
2261 // 1. It could be tracked.
2262 // 2. It could be constant-foldable.
2263 // Because of the way we solve return values, tracked calls must
2264 // never be marked overdefined in resolvedUndefsIn.
2265 if (auto *CB = dyn_cast<CallBase>(&I))
2266 if (Function *F = CB->getCalledFunction())
2267 if (TrackedRetVals.count(F))
2268 return false;
2269
2270 if (isa<LoadInst>(I)) {
2271 // A load here means one of two things: a load of undef from a global,
2272 // a load from an unknown pointer. Either way, having it return undef
2273 // is okay.
2274 return false;
2275 }
2276
2277 markOverdefined(&I);
2278 return true;
2279}
2280
2281/// While solving the dataflow for a function, we don't compute a result for
2282/// operations with an undef operand, to allow undef to be lowered to a
2283/// constant later. For example, constant folding of "zext i8 undef to i16"
2284/// would result in "i16 0", and if undef is later lowered to "i8 1", then the
2285/// zext result would become "i16 1" and would result into an overdefined
2286/// lattice value once merged with the previous result. Not computing the
2287/// result of the zext (treating undef the same as unknown) allows us to handle
2288/// a later undef->constant lowering more optimally.
2289///
2290/// However, if the operand remains undef when the solver returns, we do need
2291/// to assign some result to the instruction (otherwise we would treat it as
2292/// unreachable). For simplicity, we mark any instructions that are still
2293/// unknown as overdefined.
2295 bool MadeChange = false;
2296 for (BasicBlock &BB : F) {
2297 if (!BBExecutable.count(&BB))
2298 continue;
2299
2300 for (Instruction &I : BB)
2301 MadeChange |= resolvedUndef(I);
2302 }
2303
2304 LLVM_DEBUG(if (MadeChange) dbgs()
2305 << "\nResolved undefs in " << F.getName() << '\n');
2306
2307 return MadeChange;
2308}
2309
2310//===----------------------------------------------------------------------===//
2311//
2312// SCCPSolver implementations
2313//
2315 const DataLayout &DL,
2316 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
2317 LLVMContext &Ctx)
2318 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
2319
2320SCCPSolver::~SCCPSolver() = default;
2321
2323 AssumptionCache &AC) {
2324 Visitor->addPredicateInfo(F, DT, AC);
2325}
2326
2328 Visitor->removeSSACopies(F);
2329}
2330
2332 return Visitor->markBlockExecutable(BB);
2333}
2334
2336 return Visitor->getPredicateInfoFor(I);
2337}
2338
2340 Visitor->trackValueOfGlobalVariable(GV);
2341}
2342
2344 Visitor->addTrackedFunction(F);
2345}
2346
2348 Visitor->addToMustPreserveReturnsInFunctions(F);
2349}
2350
2352 return Visitor->mustPreserveReturn(F);
2353}
2354
2356 Visitor->addArgumentTrackedFunction(F);
2357}
2358
2360 return Visitor->isArgumentTrackedFunction(F);
2361}
2362
2365 return Visitor->getArgumentTrackedFunctions();
2366}
2367
2368void SCCPSolver::solve() { Visitor->solve(); }
2369
2371 return Visitor->resolvedUndefsIn(F);
2372}
2373
2375 Visitor->solveWhileResolvedUndefsIn(M);
2376}
2377
2378void
2380 Visitor->solveWhileResolvedUndefsIn(WorkList);
2381}
2382
2384 Visitor->solveWhileResolvedUndefs();
2385}
2386
2388 return Visitor->isBlockExecutable(BB);
2389}
2390
2392 return Visitor->isEdgeFeasible(From, To);
2393}
2394
2395std::vector<ValueLatticeElement>
2397 return Visitor->getStructLatticeValueFor(V);
2398}
2399
2401 return Visitor->removeLatticeValueFor(V);
2402}
2403
2405 Visitor->resetLatticeValueFor(Call);
2406}
2407
2409 return Visitor->getLatticeValueFor(V);
2410}
2411
2414 return Visitor->getTrackedRetVals();
2415}
2416
2419 return Visitor->getTrackedGlobals();
2420}
2421
2423 return Visitor->getMRVFunctionsTracked();
2424}
2425
2426void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2427
2429 Visitor->trackValueOfArgument(V);
2430}
2431
2433 return Visitor->isStructLatticeConstant(F, STy);
2434}
2435
2437 Type *Ty) const {
2438 return Visitor->getConstant(LV, Ty);
2439}
2440
2442 return Visitor->getConstantOrNull(V);
2443}
2444
2446 const SmallVectorImpl<ArgInfo> &Args) {
2447 Visitor->setLatticeValueForSpecializationArguments(F, Args);
2448}
2449
2451 Visitor->markFunctionUnreachable(F);
2452}
2453
2454void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2455
2456void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Hexagon Common GEP
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts()
Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
static const unsigned MaxNumRangeExtensions
static ValueLatticeElement getValueFromMetadata(const Instruction *I)
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
#define LLVM_DEBUG(...)
Definition Debug.h:119
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Function * getFunction() const
Definition Constants.h:1107
BasicBlock * getBasicBlock() const
Definition Constants.h:1106
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
static LLVM_ABI ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
LLVM_ABI bool isSizeLargerThan(uint64_t MaxSize) const
Compare set size of this range with Value.
static LLVM_ABI bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
bool isSingleElement() const
Return true if this set contains exactly one member.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static DebugLoc getTemporary()
Definition DebugLoc.h:160
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
idx_iterator idx_begin() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
Argument * arg_iterator
Definition Function.h:73
static GEPNoWrapFlags noUnsignedWrap()
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
unsigned getNumIndices() const
idx_iterator idx_begin() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isSpecialTerminator() const
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:38
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
LLVM_ABI std::optional< PredicateConstraint > getConstraint() const
Fetch condition in the form of PredicateConstraint, if possible.
Return a value (possibly void), from a function.
Helper class for SCCPSolver.
const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
const PredicateBase * getPredicateInfoFor(Instruction *I)
std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
bool resolvedUndef(Instruction &I)
void markFunctionUnreachable(Function *F)
bool markBlockExecutable(BasicBlock *BB)
bool resolvedUndefsIn(Function &F)
While solving the dataflow for a function, we don't compute a result for operations with an undef ope...
Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
SCCPInstVisitor(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
const ValueLatticeElement & getLatticeValueFor(Value *V) const
void removeLatticeValueFor(Value *V)
void trackValueOfArgument(Argument *A)
void visitCallInst(CallInst &I)
void markOverdefined(Value *V)
bool isArgumentTrackedFunction(Function *F)
void addTrackedFunction(Function *F)
void solveWhileResolvedUndefsIn(Module &M)
void trackValueOfGlobalVariable(GlobalVariable *GV)
Constant * getConstantOrNull(Value *V) const
void removeSSACopies(Function &F)
const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
ValueLatticeElement getArgAttributeVL(Argument *A)
void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
void addToMustPreserveReturnsInFunctions(Function *F)
void addArgumentTrackedFunction(Function *F)
bool isStructLatticeConstant(Function *F, StructType *STy)
void solveWhileResolvedUndefsIn(SmallVectorImpl< Function * > &WorkList)
bool isBlockExecutable(BasicBlock *BB) const
bool mustPreserveReturn(Function *F)
void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
SCCPSolver - This interface class is a general purpose solver for Sparse Conditional Constant Propaga...
Definition SCCPSolver.h:66
LLVM_ABI void visitCall(CallInst &I)
LLVM_ABI ~SCCPSolver()
LLVM_ABI void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
LLVM_ABI void trackValueOfGlobalVariable(GlobalVariable *GV)
trackValueOfGlobalVariable - Clients can use this method to inform the SCCPSolver that it should trac...
LLVM_ABI bool tryToReplaceWithConstant(Value *V)
LLVM_ABI void inferArgAttributes() const
LLVM_ABI bool isStructLatticeConstant(Function *F, StructType *STy)
LLVM_ABI void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
LLVM_ABI void solve()
Solve - Solve for constants and executable blocks.
LLVM_ABI void visit(Instruction *I)
LLVM_ABI void trackValueOfArgument(Argument *V)
trackValueOfArgument - Mark the specified argument overdefined unless it have range attribute.
LLVM_ABI const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
getTrackedGlobals - Get and return the set of inferred initializers for global variables.
LLVM_ABI void addTrackedFunction(Function *F)
addTrackedFunction - If the SCCP solver is supposed to track calls into and out of the specified func...
LLVM_ABI void solveWhileResolvedUndefsIn(Module &M)
LLVM_ABI const PredicateBase * getPredicateInfoFor(Instruction *I)
LLVM_ABI const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
LLVM_ABI const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
getMRVFunctionsTracked - Get the set of functions which return multiple values tracked by the pass.
LLVM_ABI bool resolvedUndefsIn(Function &F)
resolvedUndefsIn - While solving the dataflow for a function, we assume that branches on undef values...
LLVM_ABI void addArgumentTrackedFunction(Function *F)
LLVM_ABI void solveWhileResolvedUndefs()
LLVM_ABI void removeLatticeValueFor(Value *V)
LLVM_ABI std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
LLVM_ABI Constant * getConstantOrNull(Value *V) const
Return either a Constant or nullptr for a given Value.
LLVM_ABI bool simplifyInstsInBlock(BasicBlock &BB, SmallPtrSetImpl< Value * > &InsertedValues, Statistic &InstRemovedStat, Statistic &InstReplacedStat)
LLVM_ABI Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
Helper to return a Constant if LV is either a constant or a constant range with a single element.
LLVM_ABI const ValueLatticeElement & getLatticeValueFor(Value *V) const
LLVM_ABI void addToMustPreserveReturnsInFunctions(Function *F)
Add function to the list of functions whose return cannot be modified.
LLVM_ABI bool removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU, BasicBlock *&NewUnreachableBB) const
LLVM_ABI bool isBlockExecutable(BasicBlock *BB) const
LLVM_ABI void inferReturnAttributes() const
LLVM_ABI bool markBlockExecutable(BasicBlock *BB)
markBlockExecutable - This method can be used by clients to mark all of the blocks that are known to ...
LLVM_ABI void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
Set the Lattice Value for the arguments of a specialization F.
static LLVM_ABI bool isConstant(const ValueLatticeElement &LV)
LLVM_ABI const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
getTrackedRetVals - Get the inferred return value map.
LLVM_ABI bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
LLVM_ABI bool mustPreserveReturn(Function *F)
Returns true if the return of the given function cannot be modified.
static LLVM_ABI bool isOverdefined(const ValueLatticeElement &LV)
LLVM_ABI void markFunctionUnreachable(Function *F)
Mark all of the blocks in function F non-executable.
LLVM_ABI bool isArgumentTrackedFunction(Function *F)
Returns true if the given function is in the solver's set of argument-tracked functions.
LLVM_ABI SCCPSolver(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
LLVM_ABI void markOverdefined(Value *V)
markOverdefined - Mark the specified value overdefined.
LLVM_ABI void removeSSACopies(Function &F)
This class represents the LLVM 'select' instruction.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
static UncondBrInst * Create(BasicBlock *Target, InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
Value * getOperand(unsigned i) const
Definition User.h:207
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
LLVM_ABI Constant * getCompare(CmpInst::Predicate Pred, Type *Ty, const ValueLatticeElement &Other, const DataLayout &DL) const
true, false or undef constants, or nullptr if the comparison cannot be evaluated.
bool isConstantRangeIncludingUndef() const
static ValueLatticeElement getNot(Constant *C)
ConstantRange asConstantRange(unsigned BW, bool UndefAllowed=false) const
void setNumRangeExtensions(unsigned N)
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
unsigned getNumRangeExtensions() const
Constant * getNotConstant() const
LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
bool markConstant(Constant *V, bool MayIncludeUndef=false)
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:460
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
Represents an op.with.overflow intrinsic.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2287
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
auto m_Value()
Match an arbitrary value and ignore it.
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
static bool replaceSignedInst(SCCPSolver &Solver, SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to replace signed instructions with their unsigned equivalent.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
NoopStatistic Statistic
Definition Statistic.h:162
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1916
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
static bool refineInstruction(SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to use Inst's value range from Solver to infer the NUW flag.
static void inferAttribute(Function *F, unsigned AttrIndex, const ValueLatticeElement &Val)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:874
Struct to control some aspects related to merging constant ranges.
MergeOptions & setMaxWidenSteps(unsigned Steps=1)