28#define DEBUG_TYPE "instcombine"
34STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
35STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
38 "instcombine-max-copied-from-constant-users",
cl::init(300),
39 cl::desc(
"Maximum users to visit in copy from constant transform"),
61 while (!Worklist.
empty()) {
63 if (!Visited.
insert(Elem).second)
68 const auto [
Value, IsOffset] = Elem;
74 if (!LI->isSimple())
return false;
100 if (
Call->isCallee(&U))
103 unsigned DataOpNo =
Call->getDataOperandNo(&U);
104 bool IsArgOperand =
Call->isArgOperand(&U);
107 if (IsArgOperand &&
Call->isInAllocaArgument(DataOpNo))
113 bool NoCapture =
Call->doesNotCapture(DataOpNo);
115 (
Call->onlyReadsMemory() ||
Call->onlyReadsMemory(DataOpNo)))
120 if (
I->isLifetimeStartOrEnd()) {
121 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
133 if (
MI->isVolatile())
138 if (U.getOperandNo() == 1)
142 if (TheCopy)
return false;
146 if (IsOffset)
return false;
149 if (U.getOperandNo() != 0)
return false;
180 if (!AllocaSize || AllocaSize->isScalable())
200 if (
C->getValue().getActiveBits() <= 64) {
238class PointerReplacer {
240 PointerReplacer(InstCombinerImpl &IC, Instruction &Root,
unsigned SrcAS)
241 : IC(IC), Root(Root), FromAS(SrcAS) {}
244 void replacePointer(
Value *V);
248 Value *getReplacement(
Value *V)
const {
return WorkMap.lookup(V); }
250 return I == &Root || UsersToReplace.contains(
I);
253 bool isEqualOrValidAddrSpaceCast(
const Instruction *
I,
254 unsigned FromAS)
const {
258 unsigned ToAS = ASC->getDestAddressSpace();
259 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
262 SmallSetVector<Instruction *, 32> UsersToReplace;
263 MapVector<Value *, Value *> WorkMap;
264 InstCombinerImpl &IC;
270bool PointerReplacer::collectUsers() {
272 SmallSetVector<Instruction *, 32> ValuesToRevisit;
274 auto PushUsersToWorklist = [&](
Instruction *Inst) {
275 for (
auto *U : Inst->users())
281 auto TryPushInstOperand = [&](
Instruction *InstOp) {
282 if (!UsersToReplace.contains(InstOp)) {
283 if (!ValuesToRevisit.
insert(InstOp))
290 PushUsersToWorklist(&Root);
291 while (!Worklist.
empty()) {
294 if (
Load->isVolatile())
296 UsersToReplace.insert(Load);
301 bool IsReplaceable =
all_of(
PHI->incoming_values(),
302 [](
Value *V) { return isa<Instruction>(V); });
303 if (IsReplaceable &&
all_of(
PHI->incoming_values(), [&](
Value *V) {
304 return isAvailable(cast<Instruction>(V));
306 UsersToReplace.insert(
PHI);
307 PushUsersToWorklist(
PHI);
314 if (!IsReplaceable || !ValuesToRevisit.
insert(
PHI))
320 for (
unsigned Idx = 0; Idx <
PHI->getNumIncomingValues(); ++Idx) {
327 if (!TrueInst || !FalseInst)
331 UsersToReplace.insert(SI);
332 PushUsersToWorklist(SI);
339 if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst))
346 UsersToReplace.insert(
GEP);
347 PushUsersToWorklist(
GEP);
352 if (!TryPushInstOperand(PtrOp))
355 if (
MI->isVolatile())
357 UsersToReplace.insert(Inst);
358 }
else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
359 UsersToReplace.insert(Inst);
360 PushUsersToWorklist(Inst);
366 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *Inst <<
'\n');
374void PointerReplacer::replacePointer(
Value *V) {
379 SetVector<Instruction *> PostOrderWorklist;
380 SmallPtrSet<Instruction *, 32> Visited;
384 while (!Worklist.
empty()) {
389 if (Visited.
insert(
I).second) {
390 for (
auto *U :
I->users()) {
392 if (UsersToReplace.contains(UserInst) && !Visited.
contains(UserInst))
404 for (Instruction *
I :
reverse(PostOrderWorklist))
408void PointerReplacer::replace(Instruction *
I) {
409 if (getReplacement(
I))
413 auto *
V = getReplacement(
LT->getPointerOperand());
414 assert(V &&
"Operand not replaced");
415 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
416 LT->getAlign(),
LT->getOrdering(),
417 LT->getSyncScopeID());
426 WorkMap[NewI] = NewI;
430 Value *
V = WorkMap.lookup(
PHI->getIncomingValue(0));
431 PHI->mutateType(V ?
V->getType() :
PHI->getIncomingValue(0)->getType());
432 for (
unsigned int I = 0;
I <
PHI->getNumIncomingValues(); ++
I) {
433 Value *
V = WorkMap.lookup(
PHI->getIncomingValue(
I));
434 PHI->setIncomingValue(
I, V ? V :
PHI->getIncomingValue(
I));
438 auto *
V = getReplacement(
GEP->getPointerOperand());
439 assert(V &&
"Operand not replaced");
440 SmallVector<Value *, 8> Indices(
GEP->indices());
445 NewI->setNoWrapFlags(
GEP->getNoWrapFlags());
448 Value *TrueValue =
SI->getTrueValue();
449 Value *FalseValue =
SI->getFalseValue();
450 if (
Value *Replacement = getReplacement(TrueValue))
451 TrueValue = Replacement;
452 if (
Value *Replacement = getReplacement(FalseValue))
453 FalseValue = Replacement;
455 SI->getName(),
nullptr, SI);
460 auto *DestV = MemCpy->getRawDest();
461 auto *SrcV = MemCpy->getRawSource();
463 if (
auto *DestReplace = getReplacement(DestV))
465 if (
auto *SrcReplace = getReplacement(SrcV))
470 MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,
471 MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());
474 NewI->setAAMetadata(AAMD);
477 WorkMap[MemCpy] = NewI;
479 auto *
V = getReplacement(ASC->getPointerOperand());
480 assert(V &&
"Operand not replaced");
481 assert(isEqualOrValidAddrSpaceCast(
482 ASC,
V->getType()->getPointerAddressSpace()) &&
483 "Invalid address space cast!");
485 if (
V->getType()->getPointerAddressSpace() !=
486 ASC->getType()->getPointerAddressSpace()) {
487 auto *NewI =
new AddrSpaceCastInst(V, ASC->getType(),
"");
519 if (&*FirstInst != &AI) {
524 std::optional<TypeSize> EntryAISize =
526 if (!EntryAISize || !EntryAISize->isZero()) {
548 Value *TheSrc = Copy->getSource();
551 TheSrc, AllocaAlign,
DL, &AI, &
AC, &
DT);
552 if (AllocaAlign <= SourceAlign &&
557 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
570 PointerReplacer PtrReplacer(*
this, AI, SrcAddrSpace);
571 if (PtrReplacer.collectUsers()) {
575 PtrReplacer.replacePointer(TheSrc);
588 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
601 const Twine &Suffix) {
603 "can't fold an atomic load to requested type");
619 "can't fold an atomic store of requested type");
621 Value *Ptr =
SI.getPointerOperand();
623 SI.getAllMetadata(MD);
628 for (
const auto &MDPair : MD) {
629 unsigned ID = MDPair.first;
640 case LLVMContext::MD_dbg:
641 case LLVMContext::MD_DIAssignID:
642 case LLVMContext::MD_tbaa:
643 case LLVMContext::MD_prof:
644 case LLVMContext::MD_fpmath:
645 case LLVMContext::MD_tbaa_struct:
646 case LLVMContext::MD_alias_scope:
647 case LLVMContext::MD_noalias:
648 case LLVMContext::MD_nontemporal:
649 case LLVMContext::MD_mem_parallel_loop_access:
650 case LLVMContext::MD_access_group:
654 case LLVMContext::MD_invariant_load:
655 case LLVMContext::MD_nonnull:
656 case LLVMContext::MD_noundef:
657 case LLVMContext::MD_range:
658 case LLVMContext::MD_align:
659 case LLVMContext::MD_dereferenceable:
660 case LLVMContext::MD_dereferenceable_or_null:
690 if (!Load.isUnordered())
693 if (Load.use_empty())
697 if (Load.getPointerOperand()->isSwiftError())
703 if (Load.hasOneUse()) {
706 Type *LoadTy = Load.getType();
709 if (BC->getType()->isX86_AMXTy())
714 Type *DestTy = CastUser->getDestTy();
738 if (!
T->isAggregateType())
745 auto NumElements = ST->getNumElements();
746 if (NumElements == 1) {
751 NewLoad->
copyMetadata(LI, LLVMContext::MD_invariant_load);
759 auto *SL =
DL.getStructLayout(ST);
761 if (SL->hasPadding())
766 auto *IdxType =
DL.getIndexType(Addr->getType());
769 for (
unsigned i = 0; i < NumElements; i++) {
774 ST->getElementType(i), Ptr,
780 L->copyMetadata(LI, LLVMContext::MD_invariant_load);
789 auto *ET = AT->getElementType();
790 auto NumElements = AT->getNumElements();
791 if (NumElements == 1) {
811 auto *Zero = ConstantInt::get(IdxType, 0);
815 for (
uint64_t i = 0; i < NumElements; i++) {
816 Value *Indices[2] = {
818 ConstantInt::get(IdxType, i),
824 EltAlign, Name +
".unpack");
850 P =
P->stripPointerCasts();
867 if (GA->isInterposable())
876 std::optional<TypeSize> AllocSize = AI->getAllocationSize(
DL);
877 if (!AllocSize || AllocSize->isScalable() ||
878 AllocSize->getFixedValue() > MaxSize)
884 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
888 if (InitSize > MaxSize)
894 }
while (!Worklist.
empty());
938 Idx = FirstNZIdx(GEPI);
952 if (!AllocTy || !AllocTy->
isSized())
955 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedValue();
961 auto IsAllNonNegative = [&]() {
962 for (
unsigned i = Idx+1, e = GEPI->
getNumOperands(); i != e; ++i) {
996 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
1009 auto *Ptr =
SI.getPointerOperand();
1011 Ptr = GEPI->getOperand(0);
1018 const Value *GEPI0 = GEPI->getOperand(0);
1030Value *InstCombinerImpl::simplifyNonNullOperand(
Value *V,
1031 bool HasDereferenceable,
1035 return Sel->getOperand(2);
1038 return Sel->getOperand(1);
1041 if (!
V->hasOneUse())
1049 if (HasDereferenceable ||
GEP->isInBounds()) {
1050 if (
auto *Res = simplifyNonNullOperand(
GEP->getPointerOperand(),
1051 HasDereferenceable,
Depth + 1)) {
1052 replaceOperand(*
GEP, 0, Res);
1061 for (Use &U :
PHI->incoming_values()) {
1063 if (
auto *Res = simplifyNonNullOperand(
U.get(), HasDereferenceable,
1096 bool IsLoadCSE =
false;
1119 if (
Op->hasOneUse()) {
1142 Alignment,
DL,
SI) &&
1144 Alignment,
DL,
SI)) {
1146 auto MaybeCastedLoadOperand = [&](
Value *
Op) {
1149 Op->getName() +
".cast");
1152 Value *LoadOp1 = MaybeCastedLoadOperand(
SI->getOperand(1));
1156 Value *LoadOp2 = MaybeCastedLoadOperand(
SI->getOperand(2));
1175 if (
Value *V = simplifyNonNullOperand(
Op,
true))
1201 auto *W =
E->getVectorOperand();
1207 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1209 V =
IV->getAggregateOperand();
1215 auto *VT = V->getType();
1218 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1228 for (
const auto *EltT : ST->elements()) {
1229 if (EltT != UT->getElementType())
1259 if (!
SI.isUnordered())
1263 if (
SI.getPointerOperand()->isSwiftError())
1266 Value *V =
SI.getValueOperand();
1270 assert(!BC->getType()->isX86_AMXTy() &&
1271 "store to x86_amx* should not happen!");
1272 V = BC->getOperand(0);
1275 if (V->getType()->isX86_AMXTy())
1300 Value *V =
SI.getValueOperand();
1301 Type *
T = V->getType();
1303 if (!
T->isAggregateType())
1308 unsigned Count = ST->getNumElements();
1318 auto *SL =
DL.getStructLayout(ST);
1320 if (SL->hasPadding())
1323 const auto Align =
SI.getAlign();
1327 auto *Addr =
SI.getPointerOperand();
1329 AddrName +=
".repack";
1331 auto *IdxType =
DL.getIndexType(Addr->getType());
1332 for (
unsigned i = 0; i <
Count; i++) {
1348 auto NumElements = AT->getNumElements();
1349 if (NumElements == 1) {
1363 TypeSize EltSize =
DL.getTypeAllocSize(AT->getElementType());
1364 const auto Align =
SI.getAlign();
1368 auto *Addr =
SI.getPointerOperand();
1370 AddrName +=
".repack";
1373 auto *Zero = ConstantInt::get(IdxType, 0);
1376 for (
uint64_t i = 0; i < NumElements; i++) {
1377 Value *Indices[2] = {
1379 ConstantInt::get(IdxType, i),
1406 if (
A ==
B)
return true;
1426 Value *Val =
SI.getOperand(0);
1427 Value *Ptr =
SI.getOperand(1);
1443 if (!
SI.isUnordered())
return nullptr;
1452 if (
GEP->getOperand(0)->hasOneUse())
1468 for (
unsigned ScanInsts = 6; BBI !=
SI.getParent()->begin() && ScanInsts;
1473 if (BBI->isDebugOrPseudoInst()) {
1480 if (PrevSI->isUnordered() &&
1482 PrevSI->getValueOperand()->getType() ==
1483 SI.getValueOperand()->getType()) {
1500 assert(
SI.isUnordered() &&
"can't eliminate ordering operation");
1510 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1542 if (
Value *V = simplifyNonNullOperand(Ptr,
true))
1554 if (!
SI.isUnordered())
1565 if (*PredIter == StoreBB)
1571 if (StoreBB == DestBB || OtherBB == DestBB)
1577 if (!OtherBr || BBI == OtherBB->
begin())
1580 auto OtherStoreIsMergeable = [&](
StoreInst *OtherStore) ->
bool {
1582 OtherStore->getPointerOperand() !=
SI.getPointerOperand())
1585 auto *SIVTy =
SI.getValueOperand()->getType();
1586 auto *OSVTy = OtherStore->getValueOperand()->getType();
1588 SI.hasSameSpecialState(OtherStore);
1597 while (BBI->isDebugOrPseudoInst()) {
1598 if (BBI==OtherBB->
begin())
1605 if (!OtherStoreIsMergeable(OtherStore))
1620 if (OtherStoreIsMergeable(OtherStore))
1625 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1626 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1634 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1644 if (MergedVal !=
SI.getValueOperand()) {
1648 Builder.SetInsertPoint(OtherStore);
1658 new StoreInst(MergedVal,
SI.getOperand(1),
SI.isVolatile(),
SI.getAlign(),
1659 SI.getOrdering(),
SI.getSyncScopeID());
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void addToWorklist(Instruction &I, SmallVector< Instruction *, 4 > &Worklist)
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file implements a map that provides insertion order iteration.
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
Class for arbitrary precision integers.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
LLVM_ABI bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
This is the shared class of boolean and integer constants.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
LLVM_ABI bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
LLVM_ABI CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
bool insert(const value_type &X)
Insert a new element into the SetVector.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getZero()
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
LLVM_ABI bool isAvailable()
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
auto reverse(ContainerTy &&C)
LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool isModSet(const ModRefInfo MRI)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionAddr VTableAddr Count
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
void replace(R &&Range, const T &OldValue, const T &NewValue)
Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isNonNegative() const
Returns true if this value is known to be non-negative.