28#define DEBUG_TYPE "instcombine"
32 cl::desc(
"Maximum number phis to handle in intptr/ptrint folding"));
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38STATISTIC(NumPHICSEs,
"Number of PHI's that got CSE'd");
48 assert(!isa<CallInst>(Inst));
51 auto *
I = cast<Instruction>(V);
112 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.
user_back());
118 for (
User *U : IIP->users()) {
120 if (
LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
121 Ptr = LoadI->getPointerOperand();
122 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(U)) {
123 Ptr = SI->getPointerOperand();
125 Ptr = GI->getPointerOperand();
134 if (!HasPointerUse(IntToPtr))
144 Value *Arg = std::get<1>(Incoming);
147 if (
auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
153 Value *ArgIntToPtr =
nullptr;
155 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
157 cast<Instruction>(U)->getParent() == BB)) {
170 if (isa<PHINode>(Arg)) {
176 auto *LoadI = dyn_cast<LoadInst>(Arg);
180 if (!LoadI->hasOneUse())
192 "Not enough available ptr typed incoming values");
193 PHINode *MatchingPtrPHI =
nullptr;
194 unsigned NumPhis = 0;
195 for (
PHINode &PtrPHI : BB->phis()) {
199 if (&PtrPHI == &PN || PtrPHI.
getType() != IntToPtr->getType())
202 [&](
const auto &BlockAndValue) {
203 BasicBlock *BB = std::get<0>(BlockAndValue);
204 Value *V = std::get<1>(BlockAndValue);
205 return PtrPHI.getIncomingValueForBlock(BB) != V;
208 MatchingPtrPHI = &PtrPHI;
212 if (MatchingPtrPHI) {
214 "Phi's Type does not match with IntToPtr");
225 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
234 if (V->getType() == IntToPtr->getType())
236 auto *Inst = dyn_cast<Instruction>(V);
239 if (Inst->isTerminator())
241 auto *BB = Inst->getParent();
242 if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
253 for (
auto Incoming :
zip(PN.
blocks(), AvailablePtrVals)) {
254 auto *IncomingBB = std::get<0>(Incoming);
255 auto *IncomingVal = std::get<1>(Incoming);
257 if (IncomingVal->getType() == IntToPtr->getType()) {
263 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
264 assert((isa<PHINode>(IncomingVal) ||
265 IncomingVal->getType()->isPointerTy() ||
267 "Can not replace LoadInst with multiple uses");
280 IncomingVal->getName() +
".ptr");
281 if (
auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
285 if (isa<PHINode>(IncomingI))
287 assert(InsertPos != BB->
end() &&
"should have checked above");
290 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
315 bool OperandWithRoundTripCast =
false;
320 OperandWithRoundTripCast =
true;
323 if (!OperandWithRoundTripCast)
337 auto *
I = dyn_cast<InsertValueInst>(V);
338 if (!
I || !
I->hasOneUser() ||
I->getIndices() != FirstIVI->getIndices())
343 std::array<PHINode *, 2> NewOperands;
344 for (
int OpIdx : {0, 1}) {
345 auto *&NewOperand = NewOperands[OpIdx];
350 FirstIVI->getOperand(OpIdx)->getName() +
".pn");
353 NewOperand->addIncoming(
354 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
355 std::get<0>(Incoming));
361 FirstIVI->getIndices(), PN.
getName());
364 ++NumPHIsOfInsertValues;
377 auto *
I = dyn_cast<ExtractValueInst>(V);
378 if (!
I || !
I->hasOneUser() ||
I->getIndices() != FirstEVI->getIndices() ||
379 I->getAggregateOperand()->getType() !=
380 FirstEVI->getAggregateOperand()->getType())
388 FirstEVI->getAggregateOperand()->getName() +
".pn");
391 NewAggregateOperand->addIncoming(
392 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
393 std::get<0>(Incoming));
398 FirstEVI->getIndices(), PN.
getName());
401 ++NumPHIsOfExtractValues;
409 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
420 if (!
I ||
I->getOpcode() != Opc || !
I->hasOneUser() ||
423 I->getOperand(0)->getType() != LHSType ||
424 I->getOperand(1)->getType() != RHSType)
428 if (
CmpInst *CI = dyn_cast<CmpInst>(
I))
429 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
433 if (
I->getOperand(0) != LHSVal) LHSVal =
nullptr;
434 if (
I->getOperand(1) != RHSVal) RHSVal =
nullptr;
441 if (!LHSVal && !RHSVal)
448 PHINode *NewLHS =
nullptr, *NewRHS =
nullptr;
466 if (NewLHS || NewRHS) {
469 Value *InVal = std::get<1>(Incoming);
477 NewRHS->addIncoming(NewInRHS, InBB);
482 if (
CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
509 bool AllBasePointersAreAllocas =
true;
514 bool NeededPhi =
false;
516 bool AllInBounds =
true;
521 if (!
GEP || !
GEP->hasOneUser() ||
526 AllInBounds &=
GEP->isInBounds();
529 if (AllBasePointersAreAllocas &&
530 (!isa<AllocaInst>(
GEP->getOperand(0)) ||
531 !
GEP->hasAllConstantIndices()))
532 AllBasePointersAreAllocas =
false;
545 isa<ConstantInt>(
GEP->getOperand(
Op)))
549 GEP->getOperand(
Op)->getType())
559 FixedOperands[
Op] =
nullptr;
570 if (AllBasePointersAreAllocas)
577 bool HasAnyPHIs =
false;
578 for (
unsigned I = 0,
E = FixedOperands.
size();
I !=
E; ++
I) {
579 if (FixedOperands[
I])
587 OperandPhis[
I] = NewPN;
588 FixedOperands[
I] = NewPN;
596 Value *InVal = std::get<1>(Incoming);
599 for (
unsigned Op = 0,
E = OperandPhis.
size();
Op !=
E; ++
Op)
624 for (++BBI; BBI !=
E; ++BBI)
625 if (BBI->mayWriteToMemory()) {
628 if (
auto *CB = dyn_cast<CallBase>(BBI))
629 if (CB->onlyAccessesInaccessibleMemory())
636 if (
AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
637 bool IsAddressTaken =
false;
638 for (
User *U : AI->users()) {
639 if (isa<LoadInst>(U))
continue;
640 if (
StoreInst *SI = dyn_cast<StoreInst>(U)) {
642 if (SI->getOperand(1) == AI)
continue;
644 IsAddressTaken =
true;
648 if (!IsAddressTaken && AI->isStaticAlloca())
658 if (
AllocaInst *AI = dyn_cast<AllocaInst>(
GEP->getOperand(0)))
659 if (AI->isStaticAlloca() &&
GEP->hasAllConstantIndices())
698 Value *InVal = std::get<1>(Incoming);
700 if (!
LI || !
LI->hasOneUser() ||
LI->isAtomic())
704 if (
LI->isVolatile() != IsVolatile ||
705 LI->getPointerAddressSpace() != LoadAddrSpace)
709 if (
LI->getOperand(0)->isSwiftError())
717 LoadAlignment = std::min(LoadAlignment,
LI->getAlign());
722 if (IsVolatile &&
LI->getParent()->getTerminator()->getNumSuccessors() != 1)
735 new LoadInst(FirstLI->
getType(), NewPN,
"", IsVolatile, LoadAlignment);
737 unsigned KnownIDs[] = {
738 LLVMContext::MD_tbaa,
739 LLVMContext::MD_range,
740 LLVMContext::MD_invariant_load,
741 LLVMContext::MD_alias_scope,
742 LLVMContext::MD_noalias,
743 LLVMContext::MD_nonnull,
744 LLVMContext::MD_align,
745 LLVMContext::MD_dereferenceable,
746 LLVMContext::MD_dereferenceable_or_null,
747 LLVMContext::MD_access_group,
748 LLVMContext::MD_noundef,
751 for (
unsigned ID : KnownIDs)
757 Value *V = std::get<1>(Incoming);
760 Value *NewInVal =
LI->getOperand(0);
761 if (NewInVal != InVal)
780 cast<LoadInst>(IncValue)->setVolatile(
false);
792 if (
Instruction *TI = Phi.getParent()->getTerminator())
799 unsigned NumIncomingValues = Phi.getNumIncomingValues();
800 if (NumIncomingValues < 3)
804 Type *NarrowType =
nullptr;
805 for (
Value *V : Phi.incoming_values()) {
806 if (
auto *Zext = dyn_cast<ZExtInst>(V)) {
807 NarrowType = Zext->getSrcTy();
817 unsigned NumZexts = 0;
818 unsigned NumConsts = 0;
819 for (
Value *V : Phi.incoming_values()) {
820 if (
auto *Zext = dyn_cast<ZExtInst>(V)) {
822 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
824 NewIncoming.
push_back(Zext->getOperand(0));
826 }
else if (
auto *
C = dyn_cast<Constant>(V)) {
845 if (NumConsts == 0 || NumZexts < 2)
852 Phi.getName() +
".shrunk");
853 for (
unsigned I = 0;
I != NumIncomingValues; ++
I)
854 NewPhi->
addIncoming(NewIncoming[
I], Phi.getIncomingBlock(
I));
872 if (isa<GetElementPtrInst>(FirstInst))
874 if (isa<LoadInst>(FirstInst))
876 if (isa<InsertValueInst>(FirstInst))
878 if (isa<ExtractValueInst>(FirstInst))
886 Type *CastSrcTy =
nullptr;
888 if (isa<CastInst>(FirstInst)) {
894 if (!shouldChangeType(PN.
getType(), CastSrcTy))
897 }
else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
900 ConstantOp = dyn_cast<Constant>(FirstInst->
getOperand(1));
910 if (!
I || !
I->hasOneUser() || !
I->isSameOperationAs(FirstInst))
913 if (
I->getOperand(0)->getType() != CastSrcTy)
915 }
else if (
I->getOperand(1) != ConstantOp) {
932 Value *V = std::get<1>(Incoming);
933 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
934 if (NewInVal != InVal)
951 if (
CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
958 if (
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
963 BinOp->andIRFlags(V);
969 CmpInst *CIOp = cast<CmpInst>(FirstInst);
983 if (!PotentiallyDeadPHIs.
insert(PN).second)
987 if (PotentiallyDeadPHIs.
size() == 16)
1002 if (!ValueEqualPHIs.
insert(PN).second)
1006 if (ValueEqualPHIs.
size() == 16)
1012 if (
PHINode *OpPN = dyn_cast<PHINode>(
Op)) {
1015 }
else if (
Op != NonPhiInVal)
1025 assert(isa<IntegerType>(PN.
getType()) &&
"Expect only integer type phi");
1027 if (
auto *ConstVA = dyn_cast<ConstantInt>(V))
1028 if (!ConstVA->isZero())
1034struct PHIUsageRecord {
1040 : PHIId(Pn), Shift(Sh), Inst(
User) {}
1043 if (PHIId <
RHS.PHIId)
return true;
1044 if (PHIId >
RHS.PHIId)
return false;
1045 if (Shift <
RHS.Shift)
return true;
1046 if (Shift >
RHS.Shift)
return false;
1052struct LoweredPHIRecord {
1057 LoweredPHIRecord(
PHINode *Phi,
unsigned Sh,
Type *Ty)
1058 : PN(
Phi), Shift(Sh),
Width(Ty->getPrimitiveSizeInBits()) {}
1061 LoweredPHIRecord(
PHINode *Phi,
unsigned Sh) : PN(
Phi), Shift(Sh),
Width(0) {}
1069 return LoweredPHIRecord(
nullptr, 0);
1072 return LoweredPHIRecord(
nullptr, 1);
1079 const LoweredPHIRecord &RHS) {
1108 PHIsInspected.
insert(&FirstPhi);
1110 for (
unsigned PHIId = 0; PHIId != PHIsToSlice.
size(); ++PHIId) {
1111 PHINode *PN = PHIsToSlice[PHIId];
1119 Value *V = std::get<1>(Incoming);
1135 for (
auto *Pred : PN->
blocks())
1136 if (Pred->getFirstInsertionPt() == Pred->end())
1143 if (
PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1144 if (PHIsInspected.
insert(UserPN).second)
1150 if (isa<TruncInst>(UserI)) {
1151 PHIUsers.
push_back(PHIUsageRecord(PHIId, 0, UserI));
1156 if (UserI->
getOpcode() != Instruction::LShr ||
1163 if (cast<ConstantInt>(UserI->
getOperand(1))->getValue().uge(SizeInBits))
1166 unsigned Shift = cast<ConstantInt>(UserI->
getOperand(1))->getZExtValue();
1172 if (PHIUsers.
empty())
1180 for (
unsigned I = 1;
I != PHIsToSlice.
size(); ++
I)
dbgs()
1181 <<
"AND USER PHI #" <<
I <<
": " << *PHIsToSlice[
I] <<
'\n');
1191 for (
unsigned UserI = 0, UserE = PHIUsers.
size(); UserI != UserE; ++UserI) {
1192 unsigned PHIId = PHIUsers[UserI].PHIId;
1193 PHINode *PN = PHIsToSlice[PHIId];
1194 unsigned Offset = PHIUsers[UserI].Shift;
1195 Type *Ty = PHIUsers[UserI].Inst->getType();
1201 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN,
Offset, Ty)]) ==
nullptr) {
1207 "Truncate didn't shrink phi?");
1211 Value *InVal = std::get<1>(Incoming);
1212 Value *&PredVal = PredValues[Pred];
1227 if (
PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1230 if (
Value *Res = ExtractedVals[LoweredPHIRecord(InPHI,
Offset, Ty)]) {
1251 if (
PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1252 if (PHIsInspected.
count(OldInVal)) {
1254 find(PHIsToSlice, OldInVal) - PHIsToSlice.
begin();
1256 PHIUsageRecord(RefPHIId,
Offset, cast<Instruction>(Res)));
1263 << *EltPHI <<
'\n');
1264 ExtractedVals[LoweredPHIRecord(PN,
Offset, Ty)] = EltPHI;
1309 SuccForValue[
C] = Succ;
1312 if (
auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1313 if (BI->isUnconditional())
1316 Cond = BI->getCondition();
1319 }
else if (
auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1320 Cond = SI->getCondition();
1321 ++SuccCount[SI->getDefaultDest()];
1322 for (
auto Case : SI->cases())
1323 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1333 std::optional<bool> Invert;
1335 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1341 auto It = SuccForValue.
find(Input);
1342 return It != SuccForValue.
end() && SuccCount[It->second] == 1 &&
1349 if (IsCorrectInput(Input))
1350 NeedsInvert =
false;
1357 if (Invert && *Invert != NeedsInvert)
1360 Invert = NeedsInvert;
1370 if (InsertPt != BB->
end()) {
1394 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1395 Inst0->hasOneUser())
1409 if (IV0 != IV0Stripped &&
1411 return !CheckedIVs.insert(IV).second ||
1412 IV0Stripped == IV->stripPointerCasts();
1426 if (
PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1428 PotentiallyDeadPHIs.
insert(&PN);
1440 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
1452 auto *
CmpInst = dyn_cast<ICmpInst>(PHIUser);
1457 bool MadeChange =
false;
1465 if (NonZeroConst != VA) {
1485 while (InValNo != NumIncomingVals &&
1489 if (InValNo != NumIncomingVals) {
1494 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1496 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1503 if (InValNo == NumIncomingVals) {
1538 if (&IdenticalPN == &PN)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides internal interfaces used to implement the InstCombine.
static ConstantInt * getAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1.
static bool isDeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it.
static Value * simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN, const DominatorTree &DT)
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
static cl::opt< unsigned > MaxNumPhis("instcombine-max-num-phis", cl::init(512), cl::desc("Maximum number phis to handle in intptr/ptrint folding"))
This file provides the interface for the instcombine pass implementation.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
This is the base class for all instructions that perform data casts.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
This class is the base class for the comparison instructions.
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getPredicate() const
Return the predicate for this instruction.
OtherOps getOpcode() const
Get the opcode casted to the right type.
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getNot(Constant *C)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
iterator find(const_arg_type_t< KeyT > Val)
DomTreeNodeBase * getIDom() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Type * getSourceElementType() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateNot(Value *V, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Instruction * foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)], turn this into a phi[a,...
Instruction * foldPHIArgBinOpIntoPHI(PHINode &PN)
If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the adds all have a single user,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitPHINode(PHINode &PN)
Instruction * foldPHIArgOpIntoPHI(PHINode &PN)
Try to rotate an operation below a PHI node, using PHI nodes for its operands.
Instruction * foldPHIArgZextsIntoPHI(PHINode &PN)
TODO: This function could handle other cast types, but then it might require special-casing a cast fr...
Instruction * foldPHIArgLoadIntoPHI(PHINode &PN)
bool foldIntegerTypedPHI(PHINode &PN)
If an integer typed PHI has only one use which is an IntToPtr operation, replace the PHI with an exis...
Instruction * foldPHIArgIntToPtrToPHI(PHINode &PN)
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
Instruction * foldPHIArgGEPIntoPHI(PHINode &PN)
void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN)
Helper function for FoldPHIArgXIntoPHI() to set debug location for the folded operation.
Instruction * foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN)
If we have something like phi [extractvalue(a,0), extractvalue(b,0)], turn this into a phi[a,...
The core instruction combiner logic.
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
const BasicBlock * getParent() const
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void applyMergedLocation(DILocation *LocA, DILocation *LocB)
Merge 2 debug locations and apply it to the Instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Align getAlign() const
Return the alignment of the access that is being performed.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
void setIncomingBlock(unsigned i, BasicBlock *BB)
void setIncomingValue(unsigned i, Value *V)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
bool isSwiftError() const
Return true if this value is a swifterror value.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
self_iterator getIterator()
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
bool operator<(int64_t V1, const APSInt &V2)
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
DWARFExpression::Operation Op
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static unsigned getHashValue(const LoweredPHIRecord &Val)
static LoweredPHIRecord getEmptyKey()
static LoweredPHIRecord getTombstoneKey()
An information struct used to provide DenseMap with the various necessary components for a given valu...
SimplifyQuery getWithInstruction(Instruction *I) const