27using namespace PatternMatch;
29#define DEBUG_TYPE "instcombine"
31STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
35 "instcombine-max-copied-from-constant-users",
cl::init(300),
36 cl::desc(
"Maximum users to visit in copy from constant transform"),
58 while (!Worklist.
empty()) {
60 if (!Visited.
insert(Elem).second)
65 const auto [
Value, IsOffset] = Elem;
67 auto *
I = cast<Instruction>(U.getUser());
69 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
71 if (!LI->isSimple())
return false;
75 if (isa<PHINode, SelectInst>(
I)) {
82 if (isa<BitCastInst, AddrSpaceCastInst>(
I)) {
87 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
94 if (
auto *Call = dyn_cast<CallBase>(
I)) {
97 if (Call->isCallee(&U))
100 unsigned DataOpNo = Call->getDataOperandNo(&U);
101 bool IsArgOperand = Call->isArgOperand(&U);
104 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
110 bool NoCapture = Call->doesNotCapture(DataOpNo);
111 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
112 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
117 if (IsArgOperand && Call->isByValArgument(DataOpNo))
122 if (
I->isLifetimeStartOrEnd()) {
123 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
135 if (
MI->isVolatile())
140 if (U.getOperandNo() == 1)
144 if (TheCopy)
return false;
148 if (IsOffset)
return false;
151 if (U.getOperandNo() != 0)
return false;
204 if (
C->getValue().getActiveBits() <= 64) {
216 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
226 NewTy, New,
Idx, New->getName() +
".sub");
260class PointerReplacer {
263 : IC(IC), Root(Root) {}
266 void replacePointer(
Value *V);
273 return I == &Root || Worklist.contains(
I);
284bool PointerReplacer::collectUsers() {
285 if (!collectUsersRecursive(Root))
291 for (
auto *Inst : ValuesToRevisit)
292 if (!Worklist.contains(Inst))
297bool PointerReplacer::collectUsersRecursive(
Instruction &
I) {
298 for (
auto *U :
I.users()) {
299 auto *Inst = cast<Instruction>(&*U);
300 if (
auto *Load = dyn_cast<LoadInst>(Inst)) {
301 if (
Load->isVolatile())
303 Worklist.insert(Load);
304 }
else if (
auto *
PHI = dyn_cast<PHINode>(Inst)) {
307 [](
Value *V) { return !isa<Instruction>(V); }))
314 return !isAvailable(cast<Instruction>(V));
316 ValuesToRevisit.insert(Inst);
320 Worklist.insert(
PHI);
321 if (!collectUsersRecursive(*
PHI))
323 }
else if (
auto *SI = dyn_cast<SelectInst>(Inst)) {
324 if (!isa<Instruction>(
SI->getTrueValue()) ||
325 !isa<Instruction>(
SI->getFalseValue()))
330 ValuesToRevisit.insert(Inst);
334 if (!collectUsersRecursive(*SI))
336 }
else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
337 Worklist.insert(Inst);
338 if (!collectUsersRecursive(*Inst))
340 }
else if (
auto *
MI = dyn_cast<MemTransferInst>(Inst)) {
341 if (
MI->isVolatile())
343 Worklist.insert(Inst);
344 }
else if (Inst->isLifetimeStartOrEnd()) {
347 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *U <<
'\n');
355Value *PointerReplacer::getReplacement(
Value *V) {
return WorkMap.lookup(V); }
358 if (getReplacement(
I))
361 if (
auto *LT = dyn_cast<LoadInst>(
I)) {
362 auto *
V = getReplacement(
LT->getPointerOperand());
363 assert(V &&
"Operand not replaced");
364 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
365 LT->getAlign(),
LT->getOrdering(),
366 LT->getSyncScopeID());
370 IC.InsertNewInstWith(NewI, *LT);
371 IC.replaceInstUsesWith(*LT, NewI);
373 }
else if (
auto *
PHI = dyn_cast<PHINode>(
I)) {
374 Type *NewTy = getReplacement(
PHI->getIncomingValue(0))->getType();
377 for (
unsigned int I = 0;
I <
PHI->getNumIncomingValues(); ++
I)
378 NewPHI->addIncoming(getReplacement(
PHI->getIncomingValue(
I)),
379 PHI->getIncomingBlock(
I));
380 WorkMap[
PHI] = NewPHI;
381 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
382 auto *
V = getReplacement(
GEP->getPointerOperand());
383 assert(V &&
"Operand not replaced");
388 IC.InsertNewInstWith(NewI, *
GEP);
391 }
else if (
auto *BC = dyn_cast<BitCastInst>(
I)) {
392 auto *
V = getReplacement(BC->getOperand(0));
393 assert(V &&
"Operand not replaced");
394 auto *NewT = PointerType::getWithSamePointeeType(
395 cast<PointerType>(BC->getType()),
396 V->getType()->getPointerAddressSpace());
398 IC.InsertNewInstWith(NewI, *BC);
401 }
else if (
auto *SI = dyn_cast<SelectInst>(
I)) {
403 SI->getCondition(), getReplacement(
SI->getTrueValue()),
404 getReplacement(
SI->getFalseValue()),
SI->getName(),
nullptr, SI);
405 IC.InsertNewInstWith(NewSI, *SI);
408 }
else if (
auto *MemCpy = dyn_cast<MemTransferInst>(
I)) {
409 auto *SrcV = getReplacement(MemCpy->getRawSource());
413 assert(getReplacement(MemCpy->getRawDest()) &&
414 "destination not in replace list");
418 IC.Builder.SetInsertPoint(MemCpy);
419 auto *NewI = IC.Builder.CreateMemTransferInst(
420 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
421 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
422 MemCpy->isVolatile());
423 AAMDNodes AAMD = MemCpy->getAAMetadata();
425 NewI->setAAMetadata(AAMD);
427 IC.eraseInstFromFunction(*MemCpy);
428 WorkMap[MemCpy] = NewI;
434void PointerReplacer::replacePointer(
Value *V) {
436 auto *PT = cast<PointerType>(Root.getType());
437 auto *
NT = cast<PointerType>(
V->getType());
438 assert(PT != NT && PT->hasSameElementTypeAs(NT) &&
"Invalid usage");
465 if (FirstInst != &AI) {
469 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
497 Value *TheSrc = Copy->getSource();
500 TheSrc, AllocaAlign,
DL, &AI, &
AC, &
DT);
501 if (AllocaAlign <= SourceAlign &&
503 !isa<Instruction>(TheSrc)) {
506 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
521 PointerReplacer PtrReplacer(*
this, AI);
522 if (PtrReplacer.collectUsers()) {
527 PtrReplacer.replacePointer(Cast);
553 const Twine &Suffix) {
555 "can't fold an atomic load to requested type");
558 unsigned AS =
LI.getPointerAddressSpace();
560 Value *NewPtr =
nullptr;
562 NewPtr->
getType() == NewPtrTy))
566 NewTy, NewPtr,
LI.getAlign(),
LI.isVolatile(),
LI.getName() + Suffix);
578 "can't fold an atomic store of requested type");
581 unsigned AS =
SI.getPointerAddressSpace();
583 SI.getAllMetadata(MD);
587 SI.getAlign(),
SI.isVolatile());
589 for (
const auto &MDPair : MD) {
590 unsigned ID = MDPair.first;
601 case LLVMContext::MD_dbg:
602 case LLVMContext::MD_DIAssignID:
603 case LLVMContext::MD_tbaa:
604 case LLVMContext::MD_prof:
605 case LLVMContext::MD_fpmath:
606 case LLVMContext::MD_tbaa_struct:
607 case LLVMContext::MD_alias_scope:
608 case LLVMContext::MD_noalias:
609 case LLVMContext::MD_nontemporal:
610 case LLVMContext::MD_mem_parallel_loop_access:
611 case LLVMContext::MD_access_group:
615 case LLVMContext::MD_invariant_load:
616 case LLVMContext::MD_nonnull:
617 case LLVMContext::MD_noundef:
618 case LLVMContext::MD_range:
619 case LLVMContext::MD_align:
620 case LLVMContext::MD_dereferenceable:
621 case LLVMContext::MD_dereferenceable_or_null:
633 assert(V->getType()->isPointerTy() &&
"Expected pointer type.");
674 if (!Load.isUnordered())
677 if (Load.use_empty())
681 if (Load.getPointerOperand()->isSwiftError())
687 if (Load.hasOneUse()) {
690 Type *LoadTy = Load.getType();
691 if (
auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
693 if (BC->getType()->isX86_AMXTy())
697 if (
auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
698 Type *DestTy = CastUser->getDestTy();
722 if (!
T->isAggregateType())
727 if (
auto *ST = dyn_cast<StructType>(
T)) {
729 auto NumElements = ST->getNumElements();
730 if (NumElements == 1) {
741 auto *SL =
DL.getStructLayout(ST);
742 if (SL->hasPadding())
751 for (
unsigned i = 0; i < NumElements; i++) {
752 Value *Indices[2] = {
759 ST->getElementType(i),
Ptr,
770 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
771 auto *ET = AT->getElementType();
772 auto NumElements = AT->getNumElements();
773 if (NumElements == 1) {
788 auto EltSize =
DL.getTypeAllocSize(ET);
797 for (
uint64_t i = 0; i < NumElements; i++) {
798 Value *Indices[2] = {
832 P =
P->stripPointerCasts();
843 if (
PHINode *PN = dyn_cast<PHINode>(
P)) {
849 if (GA->isInterposable())
858 if (!AI->getAllocatedType()->isSized())
861 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
865 TypeSize TS =
DL.getTypeAllocSize(AI->getAllocatedType());
877 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
880 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
881 if (InitSize > MaxSize)
887 }
while (!Worklist.
empty());
919 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
931 Idx = FirstNZIdx(GEPI);
941 if (isa<ScalableVectorType>(SourceElementType))
945 if (!AllocTy || !AllocTy->
isSized())
948 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedValue();
954 auto IsAllNonNegative = [&]() {
992 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
1004 auto *
Ptr =
SI.getPointerOperand();
1006 Ptr = GEPI->getOperand(0);
1007 return (isa<ConstantPointerNull>(
Ptr) &&
1013 const Value *GEPI0 = GEPI->getOperand(0);
1014 if (isa<ConstantPointerNull>(GEPI0) &&
1018 if (isa<UndefValue>(Op) ||
1019 (isa<ConstantPointerNull>(Op) &&
1037 if (KnownAlign >
LI.getAlign())
1038 LI.setAlignment(KnownAlign);
1052 bool IsLoadCSE =
false;
1059 LI.getName() +
".cast"));
1064 if (!
LI.isUnordered())
return nullptr;
1076 SI->setDebugLoc(
LI.getDebugLoc());
1080 if (Op->hasOneUse()) {
1093 Align Alignment =
LI.getAlign();
1095 Alignment,
DL,
SI) &&
1097 Alignment,
DL,
SI)) {
1100 SI->getOperand(1)->getName() +
".val");
1103 SI->getOperand(2)->getName() +
".val");
1104 assert(
LI.isUnordered() &&
"implied by above");
1107 V2->setAlignment(Alignment);
1108 V2->setAtomic(
LI.getOrdering(),
LI.getSyncScopeID());
1113 if (isa<ConstantPointerNull>(
SI->getOperand(1)) &&
1115 LI.getPointerAddressSpace()))
1119 if (isa<ConstantPointerNull>(
SI->getOperand(2)) &&
1121 LI.getPointerAddressSpace()))
1144 while (
auto *
IV = dyn_cast<InsertValueInst>(V)) {
1145 auto *
E = dyn_cast<ExtractElementInst>(
IV->getInsertedValueOperand());
1148 auto *W =
E->getVectorOperand();
1153 auto *CI = dyn_cast<ConstantInt>(
E->getIndexOperand());
1154 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1156 V =
IV->getAggregateOperand();
1161 auto *UT = cast<VectorType>(U->getType());
1162 auto *VT = V->getType();
1165 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1168 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1169 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1172 auto *ST = cast<StructType>(VT);
1173 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1175 for (
const auto *EltT : ST->elements()) {
1176 if (EltT != UT->getElementType())
1206 if (!
SI.isUnordered())
1210 if (
SI.getPointerOperand()->isSwiftError())
1213 Value *V =
SI.getValueOperand();
1216 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1217 assert(!BC->getType()->isX86_AMXTy() &&
1218 "store to x86_amx* should not happen!");
1219 V = BC->getOperand(0);
1222 if (V->getType()->isX86_AMXTy())
1247 Value *V =
SI.getValueOperand();
1248 Type *
T = V->getType();
1250 if (!
T->isAggregateType())
1253 if (
auto *ST = dyn_cast<StructType>(
T)) {
1255 unsigned Count = ST->getNumElements();
1265 auto *SL =
DL.getStructLayout(ST);
1266 if (SL->hasPadding())
1269 const auto Align =
SI.getAlign();
1273 auto *
Addr =
SI.getPointerOperand();
1275 AddrName +=
".repack";
1279 for (
unsigned i = 0; i < Count; i++) {
1280 Value *Indices[2] = {
1295 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
1297 auto NumElements = AT->getNumElements();
1298 if (NumElements == 1) {
1312 auto EltSize =
DL.getTypeAllocSize(AT->getElementType());
1313 const auto Align =
SI.getAlign();
1317 auto *
Addr =
SI.getPointerOperand();
1319 AddrName +=
".repack";
1325 for (
uint64_t i = 0; i < NumElements; i++) {
1326 Value *Indices[2] = {
1355 if (
A ==
B)
return true;
1362 if (isa<BinaryOperator>(
A) ||
1365 isa<GetElementPtrInst>(
A))
1367 if (cast<Instruction>(
A)->isIdenticalToWhenDefined(BI))
1386 auto *LI = cast<LoadInst>(
SI.getValueOperand());
1387 if (!LI->getType()->isIntegerTy())
1395 if (LI->getType() == CmpLoadTy)
1400 if (
DL.getTypeStoreSizeInBits(LI->getType()) !=
1401 DL.getTypeStoreSizeInBits(CmpLoadTy))
1404 if (!
all_of(LI->users(), [LI, LoadAddr](
User *U) {
1405 auto *SI = dyn_cast<StoreInst>(U);
1406 return SI && SI->getPointerOperand() != LI &&
1407 InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1409 !SI->getPointerOperand()->isSwiftError();
1416 for (
auto *UI : LI->users()) {
1417 auto *USI = cast<StoreInst>(UI);
1427 Value *Val =
SI.getOperand(0);
1437 if (KnownAlign >
SI.getAlign())
1438 SI.setAlignment(KnownAlign);
1455 if (!
SI.isUnordered())
return nullptr;
1459 if (
Ptr->hasOneUse()) {
1460 if (isa<AllocaInst>(
Ptr))
1463 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1464 if (
GEP->getOperand(0)->hasOneUse())
1480 for (
unsigned ScanInsts = 6; BBI !=
SI.getParent()->begin() && ScanInsts;
1485 if (BBI->isDebugOrPseudoInst() ||
1486 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1491 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1493 if (PrevSI->isUnordered() &&
1495 PrevSI->getValueOperand()->getType() ==
1496 SI.getValueOperand()->getType()) {
1511 if (
LoadInst *
LI = dyn_cast<LoadInst>(BBI)) {
1513 assert(
SI.isUnordered() &&
"can't eliminate ordering operation");
1523 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1530 if (!isa<PoisonValue>(Val))
1538 if (isa<UndefValue>(Val))
1550 if (!
SI.isUnordered())
1561 if (*PredIter == StoreBB)
1567 if (StoreBB == DestBB || OtherBB == DestBB)
1572 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1573 if (!OtherBr || BBI == OtherBB->
begin())
1582 while (BBI->isDebugOrPseudoInst() ||
1583 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1584 if (BBI==OtherBB->
begin())
1590 OtherStore = dyn_cast<StoreInst>(BBI);
1591 if (!OtherStore || OtherStore->
getOperand(1) !=
SI.getOperand(1) ||
1592 !
SI.isSameOperationAs(OtherStore))
1606 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1608 !
SI.isSameOperationAs(OtherStore))
1614 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1615 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1623 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1633 if (MergedVal !=
SI.getOperand(0)) {
1644 new StoreInst(MergedVal,
SI.getOperand(1),
SI.isVolatile(),
SI.getAlign(),
1645 SI.getOrdering(),
SI.getSyncScopeID());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, T &MemI)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC, StoreInst &SI)
Converts store (bitcast (load (bitcast (select ...)))) to store (load (select ...)),...
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isMinMaxWithLoads(Value *V, Type *&LoadTy)
Returns true if instruction represent minmax pattern like: select ((cmp load V1, load V2),...
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
This file implements a map that provides insertion order iteration.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction & front() const
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static const DILocation * getMergedLocation(const DILocation *LocA, const DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
const BasicBlock & getEntryBlock() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Instruction * visitLoadInst(LoadInst &LI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Instruction * InsertNewInstBefore(Instruction *New, Instruction &Old)
Inserts an instruction New before instruction Old.
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
This class implements a map that also provides access to all stored values in a deterministic order.
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CastClass_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
void replace(Container &Cont, typename Container::iterator ContIt, typename Container::iterator ContEnd, RandomAccessIterator ValIt, RandomAccessIterator ValEnd)
Given a sequence container Cont, replace the range [ContIt, ContEnd) with the range [ValIt,...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
SimplifyQuery getWithInstruction(Instruction *I) const