Go to the documentation of this file.
27 using namespace PatternMatch;
29 #define DEBUG_TYPE "instcombine"
31 STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
32 STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
51 while (!ValuesToInspect.empty()) {
53 const bool IsOffset = ValuePair.second;
54 for (
auto &U : ValuePair.first->uses()) {
55 auto *
I = cast<Instruction>(U.getUser());
57 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
59 if (!LI->isSimple())
return false;
63 if (isa<BitCastInst>(
I) || isa<AddrSpaceCastInst>(
I)) {
68 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
75 if (
auto *Call = dyn_cast<CallBase>(
I)) {
78 if (Call->isCallee(&U))
81 unsigned DataOpNo = Call->getDataOperandNo(&U);
82 bool IsArgOperand = Call->isArgOperand(&U);
85 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
91 if (Call->onlyReadsMemory() &&
92 (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
97 if (IsArgOperand && Call->isByValArgument(DataOpNo))
102 if (
I->isLifetimeStartOrEnd()) {
103 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
104 ToDelete.push_back(
I);
116 if (U.getOperandNo() == 1) {
117 if (
MI->isVolatile())
return false;
122 if (TheCopy)
return false;
126 if (IsOffset)
return false;
129 if (U.getOperandNo() != 0)
return false;
132 if (!
AA->pointsToConstantMemory(
MI->getSource()))
181 if (
C->getValue().getActiveBits() <= 64) {
191 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
199 Value *Idx[2] = {NullIdx, NullIdx};
201 NewTy, New, Idx, New->getName() +
".sub");
235 class PointerReplacer {
253 for (
auto U :
I.users()) {
254 auto *Inst = cast<Instruction>(&*U);
255 if (
auto *
Load = dyn_cast<LoadInst>(Inst)) {
256 if (
Load->isVolatile())
258 Worklist.insert(
Load);
259 }
else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
260 Worklist.insert(Inst);
261 if (!collectUsers(*Inst))
263 }
else if (
auto *
MI = dyn_cast<MemTransferInst>(Inst)) {
264 if (
MI->isVolatile())
266 Worklist.insert(Inst);
267 }
else if (Inst->isLifetimeStartOrEnd()) {
270 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *U <<
'\n');
278 Value *PointerReplacer::getReplacement(
Value *V) {
return WorkMap.lookup(V); }
281 if (getReplacement(
I))
284 if (
auto *
LT = dyn_cast<LoadInst>(
I)) {
285 auto *V = getReplacement(
LT->getPointerOperand());
286 assert(V &&
"Operand not replaced");
287 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
288 LT->getAlign(),
LT->getOrdering(),
289 LT->getSyncScopeID());
293 IC.InsertNewInstWith(NewI, *
LT);
294 IC.replaceInstUsesWith(*
LT, NewI);
296 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
297 auto *V = getReplacement(
GEP->getPointerOperand());
298 assert(V &&
"Operand not replaced");
303 IC.InsertNewInstWith(NewI, *
GEP);
306 }
else if (
auto *BC = dyn_cast<BitCastInst>(
I)) {
307 auto *V = getReplacement(BC->getOperand(0));
308 assert(V &&
"Operand not replaced");
310 cast<PointerType>(BC->getType()),
313 IC.InsertNewInstWith(NewI, *BC);
316 }
else if (
auto *MemCpy = dyn_cast<MemTransferInst>(
I)) {
317 auto *SrcV = getReplacement(MemCpy->getRawSource());
321 assert(getReplacement(MemCpy->getRawDest()) &&
322 "destination not in replace list");
326 IC.Builder.SetInsertPoint(MemCpy);
327 auto *NewI = IC.Builder.CreateMemTransferInst(
328 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
329 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
330 MemCpy->isVolatile());
331 AAMDNodes AAMD = MemCpy->getAAMetadata();
333 NewI->setAAMetadata(AAMD);
335 IC.eraseInstFromFunction(*MemCpy);
336 WorkMap[MemCpy] = NewI;
344 auto *PT = cast<PointerType>(
I.getType());
345 auto *
NT = cast<PointerType>(V->
getType());
346 assert(PT !=
NT && PT->hasSameElementTypeAs(
NT) &&
"Invalid usage");
367 return replaceOperand(AI, 0,
373 if (FirstInst != &AI) {
377 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
380 .getKnownMinSize() != 0) {
392 return replaceInstUsesWith(AI, EntryAI);
405 Value *TheSrc = Copy->getSource();
408 TheSrc, AllocaAlign,
DL, &AI, &AC, &DT);
409 if (AllocaAlign <= SourceAlign &&
411 !isa<Instruction>(TheSrc)) {
414 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
420 eraseInstFromFunction(*Delete);
424 eraseInstFromFunction(*Copy);
429 PointerReplacer PtrReplacer(*
this);
430 if (PtrReplacer.collectUsers(AI)) {
432 eraseInstFromFunction(*Delete);
435 PtrReplacer.replacePointer(AI, Cast);
443 return visitAllocSite(AI);
461 const Twine &Suffix) {
463 "can't fold an atomic load to requested type");
468 Value *NewPtr =
nullptr;
470 NewPtr->
getType() == NewPtrTy))
471 NewPtr =
Builder.CreateBitCast(Ptr, NewPtrTy);
486 "can't fold an atomic store of requested type");
488 Value *Ptr =
SI.getPointerOperand();
489 unsigned AS =
SI.getPointerAddressSpace();
491 SI.getAllMetadata(MD);
495 SI.getAlign(),
SI.isVolatile());
497 for (
const auto &MDPair : MD) {
498 unsigned ID = MDPair.first;
509 case LLVMContext::MD_dbg:
510 case LLVMContext::MD_tbaa:
511 case LLVMContext::MD_prof:
512 case LLVMContext::MD_fpmath:
513 case LLVMContext::MD_tbaa_struct:
514 case LLVMContext::MD_alias_scope:
515 case LLVMContext::MD_noalias:
516 case LLVMContext::MD_nontemporal:
517 case LLVMContext::MD_mem_parallel_loop_access:
518 case LLVMContext::MD_access_group:
522 case LLVMContext::MD_invariant_load:
523 case LLVMContext::MD_nonnull:
524 case LLVMContext::MD_noundef:
525 case LLVMContext::MD_range:
526 case LLVMContext::MD_align:
527 case LLVMContext::MD_dereferenceable:
528 case LLVMContext::MD_dereferenceable_or_null:
599 if (
auto *BC = dyn_cast<BitCastInst>(LI.
user_back())) {
601 "load from x86_amx* should not happen!");
602 if (BC->getType()->isX86_AMXTy())
606 if (
auto* CI = dyn_cast<CastInst>(LI.
user_back()))
608 CI->getDestTy()->isPtrOrPtrVectorTy())
629 if (!
T->isAggregateType())
634 if (
auto *
ST = dyn_cast<StructType>(
T)) {
636 auto NumElements =
ST->getNumElements();
637 if (NumElements == 1) {
648 auto *SL =
DL.getStructLayout(
ST);
649 if (SL->hasPadding())
658 for (
unsigned i = 0;
i < NumElements;
i++) {
659 Value *Indices[2] = {
666 ST->getElementType(
i), Ptr,
677 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
678 auto *ET = AT->getElementType();
679 auto NumElements = AT->getNumElements();
680 if (NumElements == 1) {
695 auto EltSize =
DL.getTypeAllocSize(ET);
705 Value *Indices[2] = {
739 P =
P->stripPointerCasts();
745 Worklist.push_back(
SI->getTrueValue());
746 Worklist.push_back(
SI->getFalseValue());
750 if (
PHINode *PN = dyn_cast<PHINode>(
P)) {
756 if (GA->isInterposable())
758 Worklist.push_back(GA->getAliasee());
765 if (!AI->getAllocatedType()->isSized())
768 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
781 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
784 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
785 if (InitSize > MaxSize)
791 }
while (!Worklist.empty());
823 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
835 Idx = FirstNZIdx(GEPI);
845 if (isa<ScalableVectorType>(SourceElementType))
849 if (!AllocTy || !AllocTy->
isSized())
852 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedSize();
858 auto IsAllNonNegative = [&]() {
886 template <
typename T>
896 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
908 auto *Ptr =
SI.getPointerOperand();
910 Ptr = GEPI->getOperand(0);
911 return (isa<ConstantPointerNull>(Ptr) &&
917 const Value *GEPI0 = GEPI->getOperand(0);
918 if (isa<ConstantPointerNull>(GEPI0) &&
922 if (isa<UndefValue>(
Op) ||
923 (isa<ConstantPointerNull>(
Op) &&
944 Worklist.push(NewGEPI);
954 bool IsLoadCSE =
false;
959 return replaceInstUsesWith(
982 if (
Op->hasOneUse()) {
997 Alignment,
DL,
SI) &&
999 Alignment,
DL,
SI)) {
1002 SI->getOperand(1)->getName() +
".val");
1005 SI->getOperand(2)->getName() +
".val");
1009 V2->setAlignment(Alignment);
1015 if (isa<ConstantPointerNull>(
SI->getOperand(1)) &&
1018 return replaceOperand(LI, 0,
SI->getOperand(2));
1021 if (isa<ConstantPointerNull>(
SI->getOperand(2)) &&
1024 return replaceOperand(LI, 0,
SI->getOperand(1));
1046 while (
auto *
IV = dyn_cast<InsertValueInst>(V)) {
1047 auto *
E = dyn_cast<ExtractElementInst>(
IV->getInsertedValueOperand());
1050 auto *
W =
E->getVectorOperand();
1055 auto *CI = dyn_cast<ConstantInt>(
E->getIndexOperand());
1056 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1058 V =
IV->getAggregateOperand();
1063 auto *UT = cast<VectorType>(U->
getType());
1067 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1070 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1071 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1074 auto *
ST = cast<StructType>(VT);
1075 if (
ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1077 for (
const auto *EltT :
ST->elements()) {
1078 if (EltT != UT->getElementType())
1108 if (!
SI.isUnordered())
1112 if (
SI.getPointerOperand()->isSwiftError())
1115 Value *V =
SI.getValueOperand();
1118 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1119 assert(!BC->getType()->isX86_AMXTy() &&
1120 "store to x86_amx* should not happen!");
1121 V = BC->getOperand(0);
1149 Value *V =
SI.getValueOperand();
1152 if (!
T->isAggregateType())
1155 if (
auto *
ST = dyn_cast<StructType>(
T)) {
1157 unsigned Count =
ST->getNumElements();
1167 auto *SL =
DL.getStructLayout(
ST);
1168 if (SL->hasPadding())
1171 const auto Align =
SI.getAlign();
1175 auto *
Addr =
SI.getPointerOperand();
1177 AddrName +=
".repack";
1181 for (
unsigned i = 0;
i < Count;
i++) {
1182 Value *Indices[2] = {
1197 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
1199 auto NumElements = AT->getNumElements();
1200 if (NumElements == 1) {
1214 auto EltSize =
DL.getTypeAllocSize(AT->getElementType());
1215 const auto Align =
SI.getAlign();
1219 auto *
Addr =
SI.getPointerOperand();
1221 AddrName +=
".repack";
1228 Value *Indices[2] = {
1257 if (A ==
B)
return true;
1264 if (isa<BinaryOperator>(A) ||
1267 isa<GetElementPtrInst>(A))
1269 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1288 auto *LI = cast<LoadInst>(
SI.getValueOperand());
1289 if (!LI->getType()->isIntegerTy())
1297 if (LI->getType() == CmpLoadTy)
1302 if (
DL.getTypeStoreSizeInBits(LI->getType()) !=
1303 DL.getTypeStoreSizeInBits(CmpLoadTy))
1306 if (!
all_of(LI->users(), [LI, LoadAddr](
User *U) {
1307 auto *SI = dyn_cast<StoreInst>(U);
1308 return SI && SI->getPointerOperand() != LI &&
1309 InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1311 !SI->getPointerOperand()->isSwiftError();
1318 for (
auto *UI : LI->users()) {
1319 auto *USI = cast<StoreInst>(UI);
1329 Value *Val =
SI.getOperand(0);
1330 Value *Ptr =
SI.getOperand(1);
1334 return eraseInstFromFunction(
SI);
1338 Ptr,
DL.getPrefTypeAlign(Val->
getType()),
DL, &
SI, &AC, &DT);
1339 if (KnownAlign >
SI.getAlign())
1340 SI.setAlignment(KnownAlign);
1344 return eraseInstFromFunction(
SI);
1347 return eraseInstFromFunction(
SI);
1351 Worklist.push(NewGEPI);
1357 if (!
SI.isUnordered())
return nullptr;
1362 if (isa<AllocaInst>(Ptr))
1363 return eraseInstFromFunction(
SI);
1365 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1366 if (
GEP->getOperand(0)->hasOneUse())
1367 return eraseInstFromFunction(
SI);
1375 if (
AA->pointsToConstantMemory(Ptr))
1376 return eraseInstFromFunction(
SI);
1382 for (
unsigned ScanInsts = 6; BBI !=
SI.getParent()->
begin() && ScanInsts;
1387 if (BBI->isDebugOrPseudoInst() ||
1388 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1393 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1395 if (PrevSI->isUnordered() &&
1397 PrevSI->getValueOperand()->getType() ==
1398 SI.getValueOperand()->getType()) {
1404 eraseInstFromFunction(*PrevSI);
1413 if (
LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1415 assert(
SI.isUnordered() &&
"can't eliminate ordering operation");
1416 return eraseInstFromFunction(
SI);
1425 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1432 if (!isa<PoisonValue>(Val))
1440 if (isa<UndefValue>(Val))
1441 return eraseInstFromFunction(
SI);
1452 if (!
SI.isUnordered())
1463 if (*PredIter == StoreBB)
1469 if (StoreBB == DestBB || OtherBB == DestBB)
1474 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1475 if (!OtherBr || BBI == OtherBB->
begin())
1484 while (BBI->isDebugOrPseudoInst() ||
1485 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1486 if (BBI==OtherBB->
begin())
1492 OtherStore = dyn_cast<StoreInst>(BBI);
1493 if (!OtherStore || OtherStore->
getOperand(1) !=
SI.getOperand(1) ||
1494 !
SI.isSameOperationAs(OtherStore))
1508 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1510 !
SI.isSameOperationAs(OtherStore))
1516 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1517 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1525 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1535 if (MergedVal !=
SI.getOperand(0)) {
1539 MergedVal = InsertNewInstBefore(PN, DestBB->
front());
1546 new StoreInst(MergedVal,
SI.getOperand(1),
SI.isVolatile(),
SI.getAlign(),
1547 SI.getOrdering(),
SI.getSyncScopeID());
1548 InsertNewInstBefore(NewSI, *BBI);
1557 eraseInstFromFunction(
SI);
1558 eraseInstFromFunction(*OtherStore);
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
This is an optimization pass for GlobalISel generic memory operations.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
A parsed version of the target data layout string in and methods for querying it.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
bool hasOneUse() const
Return true if there is exactly one use of this value.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
InstListType::iterator iterator
Instruction iterators...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
const Function * getParent() const
Return the enclosing method, or null if none.
bool isPointerTy() const
True if this is an instance of PointerType.
static const DILocation * getMergedLocation(const DILocation *LocA, const DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &LI)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
This class wraps the llvm.memcpy/memmove intrinsics.
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC, StoreInst &SI)
Converts store (bitcast (load (bitcast (select ...)))) to store (load (select ...)),...
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
This class represents a no-op cast from one type to another.
const APInt & getValue() const
Return the constant as an APInt value reference.
PointerType * getType() const
Overload to return most specific pointer type.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Instruction * InsertNewInstBefore(Instruction *New, Instruction &Old)
Inserts an instruction New before instruction Old.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
const BasicBlock & getEntryBlock() const
unsigned getAddressSpace() const
Return the address space of the Pointer type.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
The instances of the Type class are immutable: once they are created, they are never changed.
Instruction * visitAllocaInst(AllocaInst &AI)
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Instruction * visitStoreInst(StoreInst &SI)
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, Value *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) pointer to an alloca.
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
This class implements a map that also provides access to all stored values in a deterministic order.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
LLVM_NODISCARD T pop_back_val()
Value * getPointerOperand()
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
unsigned getAddressSpace() const
Return the address space for the allocation.
CastClass_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
static void replace(Module &M, GlobalVariable *Old, GlobalVariable *New)
Align getAlign() const
Return the alignment of the access that is being performed.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
static IntegerType * getInt32Ty(LLVMContext &C)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Type * getSourceElementType() const
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM Basic Block Representation.
bool isSwiftError() const
Return true if this value is a swifterror value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
This is the shared class of boolean and integer constants.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bool match(Val *V, const Pattern &P)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
(vector float) vec_cmpeq(*A, *B) C
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
iterator begin()
Instruction iterator methods.
void replace(Container &Cont, typename Container::iterator ContIt, typename Container::iterator ContEnd, RandomAccessIterator ValIt, RandomAccessIterator ValEnd)
Given a sequence container Cont, replace the range [ContIt, ContEnd) with the range [ValIt,...
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const Value * getArraySize() const
Get the number of elements allocated.
STATISTIC(NumFunctions, "Total number of functions")
void setName(const Twine &Name)
Change the name of the value.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
BasicBlock * getSuccessor(unsigned Idx) const
Return the specified successor. This instruction must be a terminator.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
This struct is a compact representation of a valid (non-zero power of two) alignment.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isIntegerTy() const
True if this is an instance of IntegerType.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
An instruction for storing to memory.
const DataLayout & getDataLayout() const
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setAlignment(Align Align)
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
static bool isMinMaxWithLoads(Value *V, Type *&LoadTy)
Returns true if instruction represent minmax pattern like: select ((cmp load V1, load V2),...
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
StandardInstrumentations SI(Debug, VerifyEach)
This class represents the LLVM 'select' instruction.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
bool isUnconditional() const
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Class for arbitrary precision integers.
void setOperand(unsigned i, Value *Val)
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
StringRef - Represent a constant reference to a string, i.e.
auto m_Undef()
Match an arbitrary undef constant.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
const Function * getFunction() const
Return the function this instruction belongs to.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
StringRef getName() const
Return a constant reference to the value's name.
An instruction for reading from memory.
const Instruction & front() const
APInt zext(unsigned width) const
Zero extend to a new width.
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
Align commonAlignment(Align A, Align B)
Returns the alignment that satisfies both alignments.
static IntegerType * getInt64Ty(LLVMContext &C)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
void setAlignment(Align Align)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
unsigned getNumOperands() const
bool isVolatile() const
Return true if this is a load from a volatile memory location.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
static PointerType * getWithSamePointeeType(PointerType *PT, unsigned AddressSpace)
This constructs a pointer type with the same pointee type as input PointerType (or opaque pointer is ...
const BasicBlock * getParent() const
Align max(MaybeAlign Lhs, Align Rhs)
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static const uint32_t IV[8]
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
A SetVector that performs no allocations if smaller than a certain size.
an instruction to allocate memory on the stack
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Value * getOperand(unsigned i) const
Conditional or Unconditional Branch instruction.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, T &MemI)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
static bool isSupportedAtomicType(Type *Ty)
Instruction * visitLoadInst(LoadInst &LI)
LLVM Value Representation.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool isX86_AMXTy() const
Return true if this is X86 AMX.
BasicBlock * getSuccessor(unsigned i) const
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
reference emplace_back(ArgTypes &&... Args)
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.