27using namespace PatternMatch;
29#define DEBUG_TYPE "instcombine"
31STATISTIC(NumDeadStore,
"Number of dead stores eliminated");
32STATISTIC(NumGlobalCopies,
"Number of allocas copied from constant global");
35 "instcombine-max-copied-from-constant-users",
cl::init(300),
36 cl::desc(
"Maximum users to visit in copy from constant transform"),
41 cl::desc(
"Enable the InferAlignment pass, disabling alignment inference in "
63 while (!Worklist.
empty()) {
65 if (!Visited.
insert(Elem).second)
70 const auto [
Value, IsOffset] = Elem;
72 auto *
I = cast<Instruction>(U.getUser());
74 if (
auto *LI = dyn_cast<LoadInst>(
I)) {
76 if (!LI->isSimple())
return false;
80 if (isa<PHINode, SelectInst>(
I)) {
87 if (isa<BitCastInst, AddrSpaceCastInst>(
I)) {
92 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
99 if (
auto *Call = dyn_cast<CallBase>(
I)) {
102 if (Call->isCallee(&U))
105 unsigned DataOpNo = Call->getDataOperandNo(&U);
106 bool IsArgOperand = Call->isArgOperand(&U);
109 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
115 bool NoCapture = Call->doesNotCapture(DataOpNo);
116 if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
117 (Call->onlyReadsMemory(DataOpNo) && NoCapture))
122 if (IsArgOperand && Call->isByValArgument(DataOpNo))
127 if (
I->isLifetimeStartOrEnd()) {
128 assert(
I->use_empty() &&
"Lifetime markers have no result to use!");
140 if (
MI->isVolatile())
145 if (U.getOperandNo() == 1)
149 if (TheCopy)
return false;
153 if (IsOffset)
return false;
156 if (U.getOperandNo() != 0)
return false;
209 if (
C->getValue().getActiveBits() <= 64) {
221 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
231 NewTy, New,
Idx, New->getName() +
".sub");
266class PointerReplacer {
269 : IC(IC), Root(Root), FromAS(SrcAS) {}
272 void replacePointer(
Value *V);
279 return I == &Root || Worklist.contains(
I);
283 unsigned FromAS)
const {
284 const auto *ASC = dyn_cast<AddrSpaceCastInst>(
I);
287 unsigned ToAS = ASC->getDestAddressSpace();
288 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
300bool PointerReplacer::collectUsers() {
301 if (!collectUsersRecursive(Root))
307 for (
auto *Inst : ValuesToRevisit)
308 if (!Worklist.contains(Inst))
313bool PointerReplacer::collectUsersRecursive(
Instruction &
I) {
314 for (
auto *U :
I.users()) {
315 auto *Inst = cast<Instruction>(&*U);
316 if (
auto *Load = dyn_cast<LoadInst>(Inst)) {
317 if (
Load->isVolatile())
319 Worklist.insert(Load);
320 }
else if (
auto *
PHI = dyn_cast<PHINode>(Inst)) {
323 [](
Value *V) { return !isa<Instruction>(V); }))
330 return !isAvailable(cast<Instruction>(V));
332 ValuesToRevisit.insert(Inst);
336 Worklist.insert(
PHI);
337 if (!collectUsersRecursive(*
PHI))
339 }
else if (
auto *SI = dyn_cast<SelectInst>(Inst)) {
340 if (!isa<Instruction>(
SI->getTrueValue()) ||
341 !isa<Instruction>(
SI->getFalseValue()))
346 ValuesToRevisit.insert(Inst);
350 if (!collectUsersRecursive(*SI))
352 }
else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
353 Worklist.insert(Inst);
354 if (!collectUsersRecursive(*Inst))
356 }
else if (
auto *
MI = dyn_cast<MemTransferInst>(Inst)) {
357 if (
MI->isVolatile())
359 Worklist.insert(Inst);
360 }
else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
361 Worklist.insert(Inst);
362 }
else if (Inst->isLifetimeStartOrEnd()) {
365 LLVM_DEBUG(
dbgs() <<
"Cannot handle pointer user: " << *U <<
'\n');
373Value *PointerReplacer::getReplacement(
Value *V) {
return WorkMap.lookup(V); }
376 if (getReplacement(
I))
379 if (
auto *LT = dyn_cast<LoadInst>(
I)) {
380 auto *
V = getReplacement(
LT->getPointerOperand());
381 assert(V &&
"Operand not replaced");
382 auto *NewI =
new LoadInst(
LT->getType(), V,
"",
LT->isVolatile(),
383 LT->getAlign(),
LT->getOrdering(),
384 LT->getSyncScopeID());
388 IC.InsertNewInstWith(NewI,
LT->getIterator());
389 IC.replaceInstUsesWith(*LT, NewI);
391 }
else if (
auto *
PHI = dyn_cast<PHINode>(
I)) {
392 Type *NewTy = getReplacement(
PHI->getIncomingValue(0))->getType();
395 for (
unsigned int I = 0;
I <
PHI->getNumIncomingValues(); ++
I)
396 NewPHI->addIncoming(getReplacement(
PHI->getIncomingValue(
I)),
397 PHI->getIncomingBlock(
I));
398 WorkMap[
PHI] = NewPHI;
399 }
else if (
auto *
GEP = dyn_cast<GetElementPtrInst>(
I)) {
400 auto *
V = getReplacement(
GEP->getPointerOperand());
401 assert(V &&
"Operand not replaced");
406 IC.InsertNewInstWith(NewI,
GEP->getIterator());
409 }
else if (
auto *BC = dyn_cast<BitCastInst>(
I)) {
410 auto *
V = getReplacement(BC->getOperand(0));
411 assert(V &&
"Operand not replaced");
412 auto *NewT = PointerType::get(BC->getType()->getContext(),
413 V->getType()->getPointerAddressSpace());
415 IC.InsertNewInstWith(NewI, BC->getIterator());
418 }
else if (
auto *SI = dyn_cast<SelectInst>(
I)) {
420 SI->getCondition(), getReplacement(
SI->getTrueValue()),
421 getReplacement(
SI->getFalseValue()),
SI->getName(),
nullptr, SI);
422 IC.InsertNewInstWith(NewSI,
SI->getIterator());
425 }
else if (
auto *MemCpy = dyn_cast<MemTransferInst>(
I)) {
426 auto *SrcV = getReplacement(MemCpy->getRawSource());
430 assert(getReplacement(MemCpy->getRawDest()) &&
431 "destination not in replace list");
435 IC.Builder.SetInsertPoint(MemCpy);
436 auto *NewI = IC.Builder.CreateMemTransferInst(
437 MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
438 SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
439 MemCpy->isVolatile());
440 AAMDNodes AAMD = MemCpy->getAAMetadata();
442 NewI->setAAMetadata(AAMD);
444 IC.eraseInstFromFunction(*MemCpy);
445 WorkMap[MemCpy] = NewI;
446 }
else if (
auto *ASC = dyn_cast<AddrSpaceCastInst>(
I)) {
447 auto *
V = getReplacement(ASC->getPointerOperand());
448 assert(V &&
"Operand not replaced");
449 assert(isEqualOrValidAddrSpaceCast(
450 ASC,
V->getType()->getPointerAddressSpace()) &&
451 "Invalid address space cast!");
453 if (
V->getType()->getPointerAddressSpace() !=
454 ASC->getType()->getPointerAddressSpace()) {
457 IC.InsertNewInstWith(NewI, ASC->getIterator());
460 IC.replaceInstUsesWith(*ASC, NewV);
461 IC.eraseInstFromFunction(*ASC);
467void PointerReplacer::replacePointer(
Value *V) {
469 auto *PT = cast<PointerType>(Root.getType());
470 auto *
NT = cast<PointerType>(
V->getType());
471 assert(PT != NT &&
"Invalid usage");
498 if (FirstInst != &AI) {
502 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
528 Value *TheSrc = Copy->getSource();
531 TheSrc, AllocaAlign,
DL, &AI, &
AC, &
DT);
532 if (AllocaAlign <= SourceAlign &&
534 !isa<Instruction>(TheSrc)) {
537 LLVM_DEBUG(
dbgs() <<
"Found alloca equal to global: " << AI <<
'\n');
550 PointerReplacer PtrReplacer(*
this, AI, SrcAddrSpace);
551 if (PtrReplacer.collectUsers()) {
555 PtrReplacer.replacePointer(TheSrc);
581 const Twine &Suffix) {
583 "can't fold an atomic load to requested type");
587 LI.isVolatile(),
LI.getName() + Suffix);
599 "can't fold an atomic store of requested type");
601 Value *
Ptr = SI.getPointerOperand();
603 SI.getAllMetadata(MD);
607 NewStore->
setAtomic(SI.getOrdering(), SI.getSyncScopeID());
608 for (
const auto &MDPair : MD) {
609 unsigned ID = MDPair.first;
620 case LLVMContext::MD_dbg:
621 case LLVMContext::MD_DIAssignID:
622 case LLVMContext::MD_tbaa:
623 case LLVMContext::MD_prof:
624 case LLVMContext::MD_fpmath:
625 case LLVMContext::MD_tbaa_struct:
626 case LLVMContext::MD_alias_scope:
627 case LLVMContext::MD_noalias:
628 case LLVMContext::MD_nontemporal:
629 case LLVMContext::MD_mem_parallel_loop_access:
630 case LLVMContext::MD_access_group:
634 case LLVMContext::MD_invariant_load:
635 case LLVMContext::MD_nonnull:
636 case LLVMContext::MD_noundef:
637 case LLVMContext::MD_range:
638 case LLVMContext::MD_align:
639 case LLVMContext::MD_dereferenceable:
640 case LLVMContext::MD_dereferenceable_or_null:
652 assert(V->getType()->isPointerTy() &&
"Expected pointer type.");
693 if (!Load.isUnordered())
696 if (Load.use_empty())
700 if (Load.getPointerOperand()->isSwiftError())
706 if (Load.hasOneUse()) {
709 Type *LoadTy = Load.getType();
710 if (
auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
712 if (BC->getType()->isX86_AMXTy())
716 if (
auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
717 Type *DestTy = CastUser->getDestTy();
741 if (!
T->isAggregateType())
746 if (
auto *ST = dyn_cast<StructType>(
T)) {
748 auto NumElements = ST->getNumElements();
749 if (NumElements == 1) {
760 auto *SL =
DL.getStructLayout(ST);
763 if (SL->getSizeInBits().isScalable())
766 if (SL->hasPadding())
775 for (
unsigned i = 0; i < NumElements; i++) {
776 Value *Indices[2] = {
783 ST->getElementType(i),
Ptr,
794 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
795 auto *ET = AT->getElementType();
796 auto NumElements = AT->getNumElements();
797 if (NumElements == 1) {
821 for (
uint64_t i = 0; i < NumElements; i++) {
822 Value *Indices[2] = {
830 EltAlign,
Name +
".unpack");
856 P =
P->stripPointerCasts();
867 if (
PHINode *PN = dyn_cast<PHINode>(
P)) {
873 if (GA->isInterposable())
882 if (!AI->getAllocatedType()->isSized())
885 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
889 TypeSize TS =
DL.getTypeAllocSize(AI->getAllocatedType());
901 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
904 uint64_t InitSize =
DL.getTypeAllocSize(GV->getValueType());
905 if (InitSize > MaxSize)
911 }
while (!Worklist.
empty());
943 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(V))
955 Idx = FirstNZIdx(GEPI);
969 if (!AllocTy || !AllocTy->
isSized())
972 uint64_t TyAllocSize =
DL.getTypeAllocSize(AllocTy).getFixedValue();
978 auto IsAllNonNegative = [&]() {
1026 auto *
Ptr = SI.getPointerOperand();
1028 Ptr = GEPI->getOperand(0);
1029 return (isa<ConstantPointerNull>(
Ptr) &&
1035 const Value *GEPI0 = GEPI->getOperand(0);
1036 if (isa<ConstantPointerNull>(GEPI0) &&
1040 if (isa<UndefValue>(
Op) ||
1041 (isa<ConstantPointerNull>(
Op) &&
1060 if (KnownAlign >
LI.getAlign())
1061 LI.setAlignment(KnownAlign);
1074 bool IsLoadCSE =
false;
1081 LI.getName() +
".cast"));
1086 if (!
LI.isUnordered())
return nullptr;
1096 if (
Op->hasOneUse()) {
1109 Align Alignment =
LI.getAlign();
1111 Alignment,
DL, SI) &&
1113 Alignment,
DL, SI)) {
1116 SI->getOperand(1)->getName() +
".val");
1119 SI->getOperand(2)->getName() +
".val");
1120 assert(
LI.isUnordered() &&
"implied by above");
1123 V2->setAlignment(Alignment);
1124 V2->setAtomic(
LI.getOrdering(),
LI.getSyncScopeID());
1129 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1131 LI.getPointerAddressSpace()))
1135 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1137 LI.getPointerAddressSpace()))
1160 while (
auto *
IV = dyn_cast<InsertValueInst>(V)) {
1161 auto *
E = dyn_cast<ExtractElementInst>(
IV->getInsertedValueOperand());
1164 auto *W =
E->getVectorOperand();
1169 auto *CI = dyn_cast<ConstantInt>(
E->getIndexOperand());
1170 if (!CI ||
IV->getNumIndices() != 1 || CI->getZExtValue() != *
IV->idx_begin())
1172 V =
IV->getAggregateOperand();
1177 auto *UT = cast<VectorType>(U->getType());
1178 auto *VT = V->getType();
1181 if (
DL.getTypeStoreSizeInBits(UT) !=
DL.getTypeStoreSizeInBits(VT)) {
1184 if (
auto *AT = dyn_cast<ArrayType>(VT)) {
1185 if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1188 auto *ST = cast<StructType>(VT);
1189 if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1191 for (
const auto *EltT : ST->elements()) {
1192 if (EltT != UT->getElementType())
1222 if (!SI.isUnordered())
1226 if (SI.getPointerOperand()->isSwiftError())
1229 Value *V = SI.getValueOperand();
1232 if (
auto *BC = dyn_cast<BitCastInst>(V)) {
1233 assert(!BC->getType()->isX86_AMXTy() &&
1234 "store to x86_amx* should not happen!");
1235 V = BC->getOperand(0);
1238 if (V->getType()->isX86_AMXTy())
1263 Value *V = SI.getValueOperand();
1264 Type *
T = V->getType();
1266 if (!
T->isAggregateType())
1269 if (
auto *ST = dyn_cast<StructType>(
T)) {
1271 unsigned Count = ST->getNumElements();
1281 auto *SL =
DL.getStructLayout(ST);
1284 if (SL->getSizeInBits().isScalable())
1287 if (SL->hasPadding())
1290 const auto Align = SI.getAlign();
1294 auto *
Addr = SI.getPointerOperand();
1296 AddrName +=
".repack";
1300 for (
unsigned i = 0; i < Count; i++) {
1301 Value *Indices[2] = {
1316 if (
auto *AT = dyn_cast<ArrayType>(
T)) {
1318 auto NumElements = AT->getNumElements();
1319 if (NumElements == 1) {
1333 TypeSize EltSize =
DL.getTypeAllocSize(AT->getElementType());
1334 const auto Align = SI.getAlign();
1338 auto *
Addr = SI.getPointerOperand();
1340 AddrName +=
".repack";
1346 for (
uint64_t i = 0; i < NumElements; i++) {
1347 Value *Indices[2] = {
1376 if (
A ==
B)
return true;
1383 if (isa<BinaryOperator>(
A) ||
1386 isa<GetElementPtrInst>(
A))
1388 if (cast<Instruction>(
A)->isIdenticalToWhenDefined(BI))
1407 auto *LI = cast<LoadInst>(SI.getValueOperand());
1408 if (!LI->getType()->isIntegerTy())
1416 if (LI->getType() == CmpLoadTy)
1421 if (
DL.getTypeStoreSizeInBits(LI->getType()) !=
1422 DL.getTypeStoreSizeInBits(CmpLoadTy))
1425 if (!
all_of(LI->users(), [LI, LoadAddr](
User *U) {
1426 auto *SI = dyn_cast<StoreInst>(U);
1427 return SI && SI->getPointerOperand() != LI &&
1428 InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1430 !SI->getPointerOperand()->isSwiftError();
1437 for (
auto *UI : LI->users()) {
1438 auto *USI = cast<StoreInst>(UI);
1448 Value *Val = SI.getOperand(0);
1459 if (KnownAlign > SI.getAlign())
1460 SI.setAlignment(KnownAlign);
1476 if (!SI.isUnordered())
return nullptr;
1480 if (
Ptr->hasOneUse()) {
1481 if (isa<AllocaInst>(
Ptr))
1484 if (isa<AllocaInst>(
GEP->getOperand(0))) {
1485 if (
GEP->getOperand(0)->hasOneUse())
1501 for (
unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1506 if (BBI->isDebugOrPseudoInst()) {
1511 if (
StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1513 if (PrevSI->isUnordered() &&
1515 PrevSI->getValueOperand()->getType() ==
1516 SI.getValueOperand()->getType()) {
1531 if (
LoadInst *
LI = dyn_cast<LoadInst>(BBI)) {
1533 assert(SI.isUnordered() &&
"can't eliminate ordering operation");
1543 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1550 if (!isa<PoisonValue>(Val))
1556 if (isa<UndefValue>(
Ptr)) {
1572 if (isa<UndefValue>(Val))
1584 if (!SI.isUnordered())
1595 if (*PredIter == StoreBB)
1601 if (StoreBB == DestBB || OtherBB == DestBB)
1606 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1607 if (!OtherBr || BBI == OtherBB->
begin())
1610 auto OtherStoreIsMergeable = [&](
StoreInst *OtherStore) ->
bool {
1612 OtherStore->getPointerOperand() != SI.getPointerOperand())
1615 auto *SIVTy = SI.getValueOperand()->getType();
1616 auto *OSVTy = OtherStore->getValueOperand()->getType();
1618 SI.hasSameSpecialState(OtherStore);
1627 while (BBI->isDebugOrPseudoInst()) {
1628 if (BBI==OtherBB->
begin())
1634 OtherStore = dyn_cast<StoreInst>(BBI);
1635 if (!OtherStoreIsMergeable(OtherStore))
1649 OtherStore = dyn_cast<StoreInst>(BBI);
1650 if (OtherStoreIsMergeable(OtherStore))
1655 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1656 BBI->mayWriteToMemory() || BBI == OtherBB->
begin())
1664 if (
I->mayReadFromMemory() ||
I->mayThrow() ||
I->mayWriteToMemory())
1674 if (MergedVal != SI.getValueOperand()) {
1677 PN->
addIncoming(SI.getValueOperand(), SI.getParent());
1688 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1689 SI.getOrdering(), SI.getSyncScopeID());
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides internal interfaces used to implement the InstCombine.
static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)
Combine a store to a new type.
static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)
Combine loads to match the type of their uses' value after looking through intervening bitcasts.
static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)
static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)
static bool canSimplifyNullStoreOrGEP(StoreInst &SI)
static bool equivalentAddressValues(Value *A, Value *B)
equivalentAddressValues - Test if A and B will obviously have the same value.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)
static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)
static bool isSupportedAtomicType(Type *Ty)
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)
Returns true if V is dereferenceable for size of alloca.
static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)
cl::opt< bool > EnableInferAlignmentPass("enable-infer-alignment-pass", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Enable the InferAlignment pass, disabling alignment inference in " "InstCombine"))
static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)
Combine stores to match the type of value being stored.
static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)
static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC, StoreInst &SI)
Converts store (bitcast (load (bitcast (select ...)))) to store (load (select ...)),...
static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)
Look for extractelement/insertvalue sequence that acts like a bitcast.
static bool isMinMaxWithLoads(Value *V, Type *&LoadTy)
Returns true if instruction represent minmax pattern like: select ((cmp load V1, load V2),...
static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)
isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)
This file provides the interface for the instcombine pass implementation.
This file implements a map that provides insertion order iteration.
cl::opt< bool > EnableInferAlignmentPass
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const uint32_t IV[8]
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
unsigned getAddressSpace() const
Return the address space for the allocation.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
Conditional or Unconditional Branch instruction.
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
const BasicBlock & getEntryBlock() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Create an "inbounds" getelementptr.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
Type * getSourceElementType() const
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitLoadInst(LoadInst &LI)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitStoreInst(StoreInst &SI)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
bool removeInstructionsBeforeUnreachable(Instruction &I)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitAllocaInst(AllocaInst &AI)
const DataLayout & getDataLayout() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)
Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAlignment(Align Align)
Value * getPointerOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
This class implements a map that also provides access to all stored values in a deterministic order.
This class wraps the llvm.memcpy/memmove intrinsics.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Value * getValueOperand()
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CastClass_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_Undef()
Match an arbitrary undef constant.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)
Copy the metadata from the source instruction to the destination (the replacement for the source inst...
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, AAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)
Scan backwards to see if we have the value of the given load available locally within a small number ...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)
Combine the metadata of two instructions so that K can replace J.
void replace(Container &Cont, typename Container::iterator ContIt, typename Container::iterator ContEnd, RandomAccessIterator ValIt, RandomAccessIterator ValEnd)
Given a sequence container Cont, replace the range [ContIt, ContEnd) with the range [ValIt,...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, const DataLayout &DL, Instruction *ScanFrom=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
AAMDNodes merge(const AAMDNodes &Other) const
Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...
This struct is a compact representation of a valid (non-zero power of two) alignment.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
SimplifyQuery getWithInstruction(Instruction *I) const