18#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
19#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
31#define DEBUG_TYPE "instcombine"
38class OptimizationRemarkEmitter;
39class ProfileSummaryInfo;
40class TargetLibraryInfo;
41class TargetTransformInfo;
86 bool MadeIRChange =
false;
101 :
TTI(
TTI), Builder(Builder), Worklist(Worklist),
102 MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT),
DL(
DL),
103 SQ(
DL, &TLI, &DT, &AC, nullptr,
true,
105 ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}
113 if (
auto *BitCast = dyn_cast<BitCastInst>(V))
114 if (!OneUseOnly || BitCast->hasOneUse())
115 return BitCast->getOperand(0);
139 if (isa<Instruction>(V)) {
140 if (isa<CastInst>(V) ||
match(V, m_Neg(PatternMatch::m_Value())) ||
141 match(V, m_Not(PatternMatch::m_Value())) ||
146 if (isa<Argument>(V))
148 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
158 case CmpInst::ICMP_NE:
159 case CmpInst::ICMP_ULE:
160 case CmpInst::ICMP_SLE:
161 case CmpInst::ICMP_UGE:
162 case CmpInst::ICMP_SGE:
164 case CmpInst::FCMP_ONE:
165 case CmpInst::FCMP_OLE:
166 case CmpInst::FCMP_OGE:
175 return ConstantExpr::getAdd(
C, ConstantInt::get(
C->getType(), 1));
180 return ConstantExpr::getSub(
C, ConstantInt::get(
C->getType(), 1));
183 std::optional<std::pair<
195 return match(&
SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
196 PatternMatch::m_Value())) ||
197 match(&
SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
198 PatternMatch::m_Value()));
208 Value *getFreelyInvertedImpl(
Value *V,
bool WillInvertAllUses,
215 return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume,
222 return getFreelyInverted(V, WillInvertAllUses, Builder, Unused);
233 return getFreelyInverted(V, WillInvertAllUses,
nullptr,
234 DoesConsume) !=
nullptr;
239 return isFreeToInvert(V, WillInvertAllUses, Unused);
249 for (
Use &U : V->uses()) {
250 if (U.getUser() == IgnoredUser)
253 auto *
I = cast<Instruction>(U.getUser());
254 switch (
I->getOpcode()) {
255 case Instruction::Select:
256 if (U.getOperandNo() != 0)
258 if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(
I)))
261 case Instruction::Br:
262 assert(U.getOperandNo() == 0 &&
"Must be branching on that value.");
264 case Instruction::Xor:
266 if (!
match(
I, m_Not(PatternMatch::m_Value())))
284 bool IsRHSConstant) {
285 auto *InVTy = cast<FixedVectorType>(In->getType());
287 Type *EltTy = InVTy->getElementType();
288 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
294 case Instruction::SRem:
295 case Instruction::URem:
296 SafeC = ConstantInt::get(EltTy, 1);
298 case Instruction::FRem:
299 SafeC = ConstantFP::get(EltTy, 1.0);
303 "Only rem opcodes have no identity constant for RHS");
307 case Instruction::Shl:
308 case Instruction::LShr:
309 case Instruction::AShr:
310 case Instruction::SDiv:
311 case Instruction::UDiv:
312 case Instruction::SRem:
313 case Instruction::URem:
314 case Instruction::Sub:
315 case Instruction::FSub:
316 case Instruction::FDiv:
317 case Instruction::FRem:
318 SafeC = Constant::getNullValue(EltTy);
325 assert(SafeC &&
"Must have safe constant for binop");
326 unsigned NumElts = InVTy->getNumElements();
328 for (
unsigned i = 0; i != NumElts; ++i) {
329 Constant *
C = In->getAggregateElement(i);
330 Out[i] = isa<UndefValue>(
C) ? SafeC :
C;
332 return ConstantVector::get(Out);
350 std::optional<Instruction *> targetInstCombineIntrinsic(
IntrinsicInst &II);
351 std::optional<Value *>
354 bool &KnownBitsComputed);
355 std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
366 assert(New && !New->getParent() &&
367 "New instruction already inserted into a basic block!");
368 New->insertBefore(Old);
375 New->setDebugLoc(Old->getDebugLoc());
376 return InsertNewInstBefore(New, Old);
388 if (
I.use_empty())
return nullptr;
395 V = PoisonValue::get(
I.getType());
398 <<
" with " << *V <<
'\n');
401 if (V->use_empty() && isa<Instruction>(V) && !V->hasName() &&
I.hasName())
404 I.replaceAllUsesWith(V);
410 Value *OldOp =
I.getOperand(OpNum);
411 I.setOperand(OpNum, V);
505 unsigned Depth = 0) = 0;
509 bool AllowMultipleUsers =
false) = 0;
511 bool isValidAddrSpaceCast(
unsigned FromAS,
unsigned ToAS)
const;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRBuilder< TargetFolder > BuilderTy
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
#define LLVM_LIBRARY_VISIBILITY
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
A cache of @llvm.assume calls within a function.
InstListType::iterator iterator
Instruction iterators...
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
const DataLayout & getDataLayout() const
static bool isCanonicalPredicate(CmpInst::Predicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
bool isFreeToInvert(Value *V, bool WillInvertAllUses)
virtual Instruction * eraseInstFromFunction(Instruction &I)=0
Combiner aware instruction erasure.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
virtual ~InstCombiner()=default
LoopInfo * getLoopInfo() const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
SmallDenseMap< BasicBlock *, SmallVector< BasicBlock * >, 8 > PredOrder
Order of predecessors to canonicalize phi nodes towards.
TargetLibraryInfo & getTargetLibraryInfo() const
BlockFrequencyInfo * getBlockFrequencyInfo() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, unsigned Depth=0, const Instruction *CxtI=nullptr)
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo, const APInt &DemandedMask, KnownBits &Known, unsigned Depth=0)=0
KnownBits computeKnownBits(const Value *V, unsigned Depth, const Instruction *CxtI) const
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder, bool MinimizeSize, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
virtual Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts, unsigned Depth=0, bool AllowMultipleUsers=false)=0
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder)
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
ProfileSummaryInfo * getProfileSummaryInfo() const
OptimizationRemarkEmitter & getOptimizationRemarkEmitter() const
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
AssumptionCache & getAssumptionCache() const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
OptimizationRemarkEmitter & ORE
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
static Constant * AddOne(Constant *C)
Add one to a Constant.
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void handleUseCountDecrement(Value *V)
Should be called after decrementing the use-count on V.
A wrapper class for inspecting calls to intrinsic functions.
Analysis providing profile information.
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool match(Val *V, const Pattern &P)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
This is an optimization pass for GlobalISel generic memory operations.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Get the upper bound on bit size for this Value Op as a signed integer.
SimplifyQuery getWithInstruction(const Instruction *I) const