68#define DEBUG_TYPE "guard-widening"
70STATISTIC(GuardsEliminated,
"Number of eliminated guards");
71STATISTIC(CondBranchEliminated,
"Number of eliminated conditional branches");
75 cl::desc(
"Whether or not we should widen guards "
76 "expressed as branches by widenable conditions"),
84 assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
85 "Bad guard intrinsic?");
86 return GI->getArgOperand(0);
93 return cast<BranchInst>(
I)->getCondition();
100 assert(GI->getIntrinsicID() == Intrinsic::experimental_guard &&
101 "Bad guard intrinsic?");
102 GI->setArgOperand(0, NewCond);
105 cast<BranchInst>(
I)->setCondition(NewCond);
126 Value *Condition, *WC;
129 return cast<Instruction>(WC);
133class GuardWideningImpl {
157 bool eliminateInstrViaWidening(
160 GuardsPerBlock,
bool InvertCondition =
false);
165 WS_IllegalOrNegative,
180 static StringRef scoreTypeToString(WideningScore WS);
185 WideningScore computeWideningScore(
Instruction *DominatedInstr,
192 return canBeHoistedTo(V, InsertPos, Visited);
210 Value *&Result,
bool InvertCondition);
227 void setBase(
const Value *NewBase) {
Base = NewBase; }
230 const Value *getBase()
const {
return Base; }
232 const APInt &getOffsetValue()
const {
return getOffset()->getValue(); }
234 ICmpInst *getCheckInst()
const {
return CheckInst; }
238 Base->printAsOperand(
OS, PrintTypes);
240 Offset->printAsOperand(
OS, PrintTypes);
242 Length->printAsOperand(
OS, PrintTypes);
256 return parseRangeChecks(CheckCond, Checks, Visited);
271 bool isWideningCondProfitable(
Value *Cond0,
Value *Cond1,
bool InvertCond) {
273 return widenCondCommon(Cond0, Cond1,
nullptr, ResultUnused,
281 bool InvertCondition) {
283 Instruction *InsertPt = findInsertionPointForWideCondition(ToWiden);
284 widenCondCommon(getCondition(ToWiden), NewCondition, InsertPt, Result,
290 setCondition(ToWiden, Result);
297 std::function<
bool(
BasicBlock *)> BlockFilter)
298 : DT(DT), PDT(PDT), LI(LI), AC(AC), MSSAU(MSSAU), Root(Root),
299 BlockFilter(BlockFilter) {}
314bool GuardWideningImpl::run() {
316 bool Changed =
false;
319 auto *BB = (*DFI)->getBlock();
320 if (!BlockFilter(BB))
323 auto &CurrentList = GuardsInBlock[BB];
327 CurrentList.push_back(cast<Instruction>(&
I));
329 for (
auto *II : CurrentList)
330 Changed |= eliminateInstrViaWidening(II, DFI, GuardsInBlock);
333 assert(EliminatedGuardsAndBranches.
empty() || Changed);
334 for (
auto *
I : EliminatedGuardsAndBranches)
335 if (!WidenedGuards.
count(
I)) {
336 assert(isa<ConstantInt>(getCondition(
I)) &&
"Should be!");
338 eliminateGuard(
I, MSSAU);
341 "Eliminated something other than guard or branch?");
342 ++CondBranchEliminated;
349bool GuardWideningImpl::eliminateInstrViaWidening(
352 GuardsInBlock,
bool InvertCondition) {
356 if (isa<ConstantInt>(getCondition(Instr)))
360 auto BestScoreSoFar = WS_IllegalOrNegative;
364 for (
unsigned i = 0, e = DFSI.
getPathLength(); i != e; ++i) {
365 auto *CurBB = DFSI.
getPath(i)->getBlock();
366 if (!BlockFilter(CurBB))
368 assert(GuardsInBlock.
count(CurBB) &&
"Must have been populated by now!");
369 const auto &GuardsInCurBB = GuardsInBlock.
find(CurBB)->second;
371 auto I = GuardsInCurBB.begin();
373 : GuardsInCurBB.
end();
378 for (
auto &
I : *CurBB) {
379 if (
Index == GuardsInCurBB.size())
381 if (GuardsInCurBB[
Index] == &
I)
385 "Guards expected to be in order!");
392 auto Score = computeWideningScore(Instr, Candidate, InvertCondition);
394 <<
" and " << *getCondition(Candidate) <<
" is "
395 << scoreTypeToString(Score) <<
"\n");
396 if (Score > BestScoreSoFar) {
397 BestScoreSoFar = Score;
398 BestSoFar = Candidate;
403 if (BestScoreSoFar == WS_IllegalOrNegative) {
404 LLVM_DEBUG(
dbgs() <<
"Did not eliminate guard " << *Instr <<
"\n");
408 assert(BestSoFar != Instr &&
"Should have never visited same guard!");
411 LLVM_DEBUG(
dbgs() <<
"Widening " << *Instr <<
" into " << *BestSoFar
412 <<
" with score " << scoreTypeToString(BestScoreSoFar)
414 widenGuard(BestSoFar, getCondition(Instr), InvertCondition);
415 auto NewGuardCondition = InvertCondition
418 setCondition(Instr, NewGuardCondition);
419 EliminatedGuardsAndBranches.push_back(Instr);
420 WidenedGuards.
insert(BestSoFar);
424GuardWideningImpl::WideningScore
425GuardWideningImpl::computeWideningScore(
Instruction *DominatedInstr,
430 bool HoistingOutOfLoop =
false;
432 if (DominatingGuardLoop != DominatedInstrLoop) {
435 if (DominatingGuardLoop &&
436 !DominatingGuardLoop->
contains(DominatedInstrLoop))
437 return WS_IllegalOrNegative;
439 HoistingOutOfLoop =
true;
442 auto *WideningPoint = findInsertionPointForWideCondition(DominatingGuard);
443 if (!canBeHoistedTo(getCondition(DominatedInstr), WideningPoint))
444 return WS_IllegalOrNegative;
445 if (!canBeHoistedTo(getCondition(DominatingGuard), WideningPoint))
446 return WS_IllegalOrNegative;
456 if (isWideningCondProfitable(getCondition(DominatedInstr),
457 getCondition(DominatingGuard), InvertCond))
458 return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;
460 if (HoistingOutOfLoop)
466 if (
auto *UniqueSucc = BB->getUniqueSuccessor())
468 auto *
Term = BB->getTerminator();
470 const BasicBlock *IfTrue =
nullptr, *IfFalse =
nullptr;
471 using namespace PatternMatch;
476 if (
auto *ConstCond = dyn_cast<ConstantInt>(
Cond))
477 return ConstCond->isAllOnesValue() ? IfTrue : IfFalse;
492 auto MaybeHoistingToHotterBlock = [&]() {
493 const auto *DominatingBlock = DominatingGuard->
getParent();
494 const auto *DominatedBlock = DominatedInstr->
getParent();
499 assert(DT.
dominates(DominatingBlock, DominatedBlock) &&
"No dominance");
500 while (DominatedBlock != DominatingBlock) {
501 auto *LikelySucc = GetLikelySuccessor(DominatingBlock);
508 DominatingBlock = LikelySucc;
512 if (DominatedBlock == DominatingBlock)
516 if (!DT.
dominates(DominatingBlock, DominatedBlock))
519 if (!PDT)
return true;
520 return !PDT->
dominates(DominatedBlock, DominatingBlock);
523 return MaybeHoistingToHotterBlock() ? WS_IllegalOrNegative : WS_Neutral;
526bool GuardWideningImpl::canBeHoistedTo(
529 auto *Inst = dyn_cast<Instruction>(V);
534 Inst->mayReadFromMemory())
540 assert(!isa<PHINode>(Loc) &&
541 "PHIs should return false for isSafeToSpeculativelyExecute");
543 "We did a DFS from the block entry!");
544 return all_of(Inst->operands(),
545 [&](
Value *Op) { return canBeHoistedTo(Op, Loc, Visited); });
549 auto *Inst = dyn_cast<Instruction>(V);
554 !Inst->mayReadFromMemory() &&
555 "Should've checked with canBeHoistedTo!");
557 for (
Value *Op : Inst->operands())
558 makeAvailableAt(Op, Loc);
560 Inst->moveBefore(Loc);
562 Inst->dropPoisonGeneratingFlags();
565bool GuardWideningImpl::widenCondCommon(
Value *Cond0,
Value *Cond1,
567 bool InvertCondition) {
578 Pred1 = ICmpInst::getInversePredicate(Pred1);
588 if (std::optional<ConstantRange> Intersect =
592 if (Intersect->getEquivalentICmp(Pred, NewRHSAP)) {
596 assert(canBeHoistedTo(LHS, InsertPt) &&
"must be");
597 makeAvailableAt(LHS, InsertPt);
609 if (!InvertCondition &&
610 parseRangeChecks(Cond0, Checks) && parseRangeChecks(Cond1, Checks) &&
611 combineRangeChecks(Checks, CombinedChecks)) {
614 for (
auto &RC : CombinedChecks) {
615 makeAvailableAt(RC.getCheckInst(), InsertPt);
617 Result = BinaryOperator::CreateAnd(RC.getCheckInst(), Result,
"",
620 Result = RC.getCheckInst();
622 assert(Result &&
"Failed to find result value");
623 Result->setName(
"wide.chk");
632 makeAvailableAt(Cond0, InsertPt);
633 makeAvailableAt(Cond1, InsertPt);
636 Result = BinaryOperator::CreateAnd(Cond0, Cond1,
"wide.chk", InsertPt);
643bool GuardWideningImpl::parseRangeChecks(
646 if (!Visited.
insert(CheckCond).second)
652 Value *AndLHS, *AndRHS;
654 return parseRangeChecks(AndLHS, Checks) &&
655 parseRangeChecks(AndRHS, Checks);
658 auto *IC = dyn_cast<ICmpInst>(CheckCond);
659 if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
660 (IC->getPredicate() != ICmpInst::ICMP_ULT &&
661 IC->getPredicate() != ICmpInst::ICMP_UGT))
664 const Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
665 if (IC->getPredicate() == ICmpInst::ICMP_UGT)
668 auto &
DL = IC->getModule()->getDataLayout();
670 GuardWideningImpl::RangeCheck
Check(
671 CmpLHS, cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType())),
689 auto *BaseInst = dyn_cast<Instruction>(
Check.getBase());
691 "Unreachable instruction?");
695 Check.setBase(OpLHS);
703 Check.setBase(OpLHS);
715bool GuardWideningImpl::combineRangeChecks(
718 unsigned OldCount = Checks.
size();
719 while (!Checks.
empty()) {
722 const Value *CurrentBase = Checks.
front().getBase();
723 const Value *CurrentLength = Checks.
front().getLength();
727 auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
728 return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
731 copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
734 assert(CurrentChecks.
size() != 0 &&
"We know we have at least one!");
736 if (CurrentChecks.
size() < 3) {
744 llvm::sort(CurrentChecks, [&](
const GuardWideningImpl::RangeCheck &LHS,
745 const GuardWideningImpl::RangeCheck &RHS) {
746 return LHS.getOffsetValue().slt(
RHS.getOffsetValue());
755 if ((MaxOffset->
getValue() - MinOffset->getValue())
759 APInt MaxDiff = MaxOffset->
getValue() - MinOffset->getValue();
761 auto OffsetOK = [&](
const GuardWideningImpl::RangeCheck &RC) {
762 return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
807 assert(RangeChecksOut.
size() <= OldCount &&
"We pessimized!");
808 return RangeChecksOut.
size() != OldCount;
812StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {
814 case WS_IllegalOrNegative:
815 return "IllegalOrNegative";
820 case WS_VeryPositive:
821 return "VeryPositive";
835 std::unique_ptr<MemorySSAUpdater> MSSAU;
837 MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAA->getMSSA());
838 if (!GuardWideningImpl(DT, &PDT, LI, AC, MSSAU ? MSSAU.get() :
nullptr,
854 RootBB = L.getHeader();
856 return BB == RootBB || L.contains(BB);
858 std::unique_ptr<MemorySSAUpdater> MSSAU;
860 MSSAU = std::make_unique<MemorySSAUpdater>(AR.
MSSA);
861 if (!GuardWideningImpl(AR.
DT,
nullptr, AR.
LI, AR.
AC,
862 MSSAU ? MSSAU.get() :
nullptr, AR.
DT.
getNode(RootBB),
884 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
885 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
886 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
887 auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
888 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
889 std::unique_ptr<MemorySSAUpdater> MSSAU;
891 MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAWP->getMSSA());
892 return GuardWideningImpl(DT, &PDT, LI, AC, MSSAU ? MSSAU.get() :
nullptr,
909struct LoopGuardWideningLegacyPass :
public LoopPass {
912 LoopGuardWideningLegacyPass() :
LoopPass(
ID) {
919 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
920 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
921 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
922 *
L->getHeader()->getParent());
923 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
924 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() :
nullptr;
925 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
926 std::unique_ptr<MemorySSAUpdater> MSSAU;
928 MSSAU = std::make_unique<MemorySSAUpdater>(&MSSAWP->getMSSA());
932 RootBB =
L->getHeader();
934 return BB == RootBB ||
L->contains(BB);
936 return GuardWideningImpl(DT, PDT, LI, AC, MSSAU ? MSSAU.get() :
nullptr,
937 DT.
getNode(RootBB), BlockFilter)
950char GuardWideningLegacyPass::ID = 0;
951char LoopGuardWideningLegacyPass::ID = 0;
972 return new GuardWideningLegacyPass();
976 return new LoopGuardWideningLegacyPass();
static SDValue Widen(SelectionDAG *CurDAG, SDValue N)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static cl::opt< bool > WidenBranchGuards("guard-widening-widen-branch-guards", cl::Hidden, cl::desc("Whether or not we should widen guards " "expressed as branches by widenable conditions"), cl::init(true))
static bool isSupportedGuardInstruction(const Instruction *Insn)
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
modulo schedule Modulo Schedule test pass
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isMinValue() const
Determine if this is the smallest unsigned value.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
const CallInst * getPostdominatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize that is present either in current ...
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Represents analyses that only rely on functions' control flow.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents a range of values.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Implements a dense probed hash-table based set.
Analysis pass which computes a DominatorTree.
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
This instruction compares its operands according to the predicate given to the constructor.
const BasicBlock * getParent() const
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
A wrapper class for inspecting calls to intrinsic functions.
This class provides an interface for updating the loop pass manager based on mutations to the loop ne...
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
An analysis that produces MemorySSA for a function.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
Legacy analysis pass which computes MemorySSA.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Analysis pass which computes a PostDominatorTree.
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
bool dominates(const Instruction *I1, const Instruction *I2) const
Return true if I1 dominates I2.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
LLVM Value Representation.
LLVMContext & getContext() const
All values hold a context through their type.
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
unsigned getPathLength() const
getPathLength - Return the length of the path from the entry node to the current node,...
NodeRef getPath(unsigned n) const
getPath - Return the n'th node in the path from the entry node to the current node.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
const_iterator end(StringRef path)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
FunctionPass * createGuardWideningPass()
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void initializeLoopGuardWideningLegacyPassPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
df_iterator< T > df_begin(const T &G)
OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P)
Provide wrappers to std::copy_if which take ranges instead of having to pass begin/end explicitly.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
void setWidenableBranchCond(BranchInst *WidenableBR, Value *Cond)
Given a branch we know is widenable (defined per Analysis/GuardUtils.h), set it's condition such that...
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
bool parseWidenableBranch(const User *U, Value *&Condition, Value *&WidenableCondition, BasicBlock *&IfTrueBB, BasicBlock *&IfFalseBB)
If U is widenable branch looking like: cond = ... wc = call i1 @llvm.experimental....
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void getLoopAnalysisUsage(AnalysisUsage &AU)
Helper to consistently add the set of standard passes to a loop pass's AnalysisUsage.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isGuardAsWidenableBranch(const User *U)
Returns true iff U has semantics of a guard expressed in a form of a widenable conditional branch to ...
void initializeGuardWideningLegacyPassPass(PassRegistry &)
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
bool isKnownNonNegative(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Returns true if the give value is known to be non-negative.
PreservedAnalyses getLoopPassPreservedAnalyses()
Returns the minimum set of Analyses that all loop passes must preserve.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
df_iterator< T > df_end(const T &G)
Pass * createLoopGuardWideningPass()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...