201 "disable-separate-const-offset-from-gep",
cl::init(
false),
202 cl::desc(
"Do not separate the constant offset from a GEP instruction"),
210 cl::desc(
"Verify this pass produces no dead code"),
228class ConstantOffsetExtractor {
248 : IP(InsertionPt),
DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
286 Value *rebuildWithoutConstOffset();
303 Value *distributeExtsAndCloneChain(
unsigned ChainIndex);
306 Value *removeConstOffset(
unsigned ChainIndex);
320 bool CanTraceInto(
bool SignExtended,
bool ZeroExtended,
BinaryOperator *BO,
345class SeparateConstOffsetFromGEPLegacyPass :
public FunctionPass {
349 SeparateConstOffsetFromGEPLegacyPass(
bool LowerGEP =
false)
372class SeparateConstOffsetFromGEP {
374 SeparateConstOffsetFromGEP(
377 : DT(DT), LI(LI), TLI(TLI), GetTTI(GetTTI), LowerGEP(LowerGEP) {}
383 using ExprKey = std::pair<Value *, Value *>;
386 static ExprKey createNormalizedCommutablePair(
Value *
A,
Value *
B) {
404 int64_t AccumulativeByteOffset);
414 int64_t AccumulativeByteOffset);
462 bool hasMoreThanOneUseInLoop(
Value *v,
Loop *L);
488char SeparateConstOffsetFromGEPLegacyPass::ID = 0;
491 SeparateConstOffsetFromGEPLegacyPass,
"separate-const-offset-from-gep",
492 "Split GEPs to a variadic base and a constant offset for better CSE",
false,
500 SeparateConstOffsetFromGEPLegacyPass, "separate-
const-offset-from-
gep",
501 "Split GEPs to a variadic base and a constant offset
for better
CSE",
false,
505 return new SeparateConstOffsetFromGEPLegacyPass(LowerGEP);
508bool ConstantOffsetExtractor::CanTraceInto(
bool SignExtended,
515 if (BO->
getOpcode() != Instruction::Add &&
526 if (BO->
getOpcode() == Instruction::Or &&
533 if (ZeroExtended && !SignExtended && BO->
getOpcode() == Instruction::Sub)
556 if (
ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
557 if (!ConstLHS->isNegative())
560 if (
ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
561 if (!ConstRHS->isNegative())
568 if (BO->
getOpcode() == Instruction::Add ||
583 size_t ChainLength = UserChain.size();
594 if (ConstantOffset != 0)
return ConstantOffset;
598 UserChain.resize(ChainLength);
600 ConstantOffset =
find(BO->
getOperand(1), SignExtended, ZeroExtended,
605 ConstantOffset = -ConstantOffset;
608 if (ConstantOffset == 0)
609 UserChain.resize(ChainLength);
611 return ConstantOffset;
614APInt ConstantOffsetExtractor::find(
Value *V,
bool SignExtended,
619 unsigned BitWidth = cast<IntegerType>(
V->getType())->getBitWidth();
622 User *
U = dyn_cast<User>(V);
628 ConstantOffset = CI->getValue();
631 if (CanTraceInto(SignExtended, ZeroExtended, BO,
NonNegative))
632 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
633 }
else if (isa<TruncInst>(V)) {
637 }
else if (isa<SExtInst>(V)) {
638 ConstantOffset =
find(
U->getOperand(0),
true,
640 }
else if (isa<ZExtInst>(V)) {
646 find(
U->getOperand(0),
false,
653 if (ConstantOffset != 0)
654 UserChain.push_back(U);
655 return ConstantOffset;
658Value *ConstantOffsetExtractor::applyExts(
Value *V) {
663 if (
Constant *
C = dyn_cast<Constant>(Current)) {
669 Ext->setOperand(0, Current);
670 Ext->insertBefore(IP);
677Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
678 distributeExtsAndCloneChain(UserChain.size() - 1);
680 unsigned NewSize = 0;
681 for (
User *
I : UserChain) {
683 UserChain[NewSize] =
I;
687 UserChain.resize(NewSize);
688 return removeConstOffset(UserChain.size() - 1);
692ConstantOffsetExtractor::distributeExtsAndCloneChain(
unsigned ChainIndex) {
693 User *
U = UserChain[ChainIndex];
694 if (ChainIndex == 0) {
695 assert(isa<ConstantInt>(U));
697 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
700 if (
CastInst *Cast = dyn_cast<CastInst>(U)) {
702 (isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) &&
703 "Only following instructions can be traced: sext, zext & trunc");
704 ExtInsts.push_back(Cast);
705 UserChain[ChainIndex] =
nullptr;
706 return distributeExtsAndCloneChain(ChainIndex - 1);
712 unsigned OpNo = (BO->
getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
714 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
724 return UserChain[ChainIndex] = NewBO;
727Value *ConstantOffsetExtractor::removeConstOffset(
unsigned ChainIndex) {
728 if (ChainIndex == 0) {
729 assert(isa<ConstantInt>(UserChain[ChainIndex]));
730 return ConstantInt::getNullValue(UserChain[ChainIndex]->
getType());
735 "distributeExtsAndCloneChain clones each BinaryOperator in "
736 "UserChain, so no one should be used more than "
739 unsigned OpNo = (BO->
getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
741 Value *NextInChain = removeConstOffset(ChainIndex - 1);
746 if (
ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
747 if (CI->isZero() && !(BO->
getOpcode() == Instruction::Sub && OpNo == 0))
752 if (BO->
getOpcode() == Instruction::Or) {
766 NewOp = Instruction::Add;
780 User *&UserChainTail,
782 ConstantOffsetExtractor Extractor(
GEP, DT);
784 APInt ConstantOffset =
785 Extractor.find(
Idx,
false,
false,
787 if (ConstantOffset == 0) {
788 UserChainTail =
nullptr;
792 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
793 UserChainTail = Extractor.UserChain.back();
794 return IdxWithoutConstOffset;
800 return ConstantOffsetExtractor(
GEP, DT)
801 .find(
Idx,
false,
false,
806bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToIndexSize(
808 bool Changed =
false;
809 Type *PtrIdxTy =
DL->getIndexType(
GEP->getType());
812 I !=
E; ++
I, ++GTI) {
815 if ((*I)->getType() != PtrIdxTy) {
826 bool &NeedsExtraction) {
827 NeedsExtraction =
false;
828 int64_t AccumulativeByteOffset = 0;
830 for (
unsigned I = 1,
E =
GEP->getNumOperands();
I !=
E; ++
I, ++GTI) {
837 int64_t ConstantOffset =
838 ConstantOffsetExtractor::Find(
GEP->getOperand(
I),
GEP, DT);
839 if (ConstantOffset != 0) {
840 NeedsExtraction =
true;
844 AccumulativeByteOffset +=
847 }
else if (LowerGEP) {
852 NeedsExtraction =
true;
853 AccumulativeByteOffset +=
854 DL->getStructLayout(StTy)->getElementOffset(
Field);
858 return AccumulativeByteOffset;
861void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
869 bool isSwapCandidate =
870 L &&
L->isLoopInvariant(ResultPtr) &&
871 !hasMoreThanOneUseInLoop(ResultPtr, L);
872 Value *FirstResult =
nullptr;
877 for (
unsigned I = 1,
E =
Variadic->getNumOperands();
I !=
E; ++
I, ++GTI) {
888 if (ElementSize != 1) {
900 if (FirstResult ==
nullptr)
901 FirstResult = ResultPtr;
906 if (AccumulativeByteOffset != 0) {
911 isSwapCandidate =
false;
916 auto *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
917 auto *SecondGEP = dyn_cast<GetElementPtrInst>(ResultPtr);
918 if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
919 swapGEPOperand(FirstGEP, SecondGEP);
921 Variadic->replaceAllUsesWith(ResultPtr);
927 int64_t AccumulativeByteOffset) {
931 "Pointer type must match index type for arithmetic-based lowering of "
939 for (
unsigned I = 1,
E =
Variadic->getNumOperands();
I !=
E; ++
I, ++GTI) {
950 if (ElementSize != 1) {
959 ResultPtr =
Builder.CreateAdd(ResultPtr,
Idx);
964 if (AccumulativeByteOffset != 0) {
970 Variadic->replaceAllUsesWith(ResultPtr);
976 if (
GEP->getType()->isVectorTy())
981 if (
GEP->hasAllConstantIndices())
984 bool Changed = canonicalizeArrayIndicesToIndexSize(
GEP);
986 bool NeedsExtraction;
987 int64_t AccumulativeByteOffset = accumulateByteOffset(
GEP, NeedsExtraction);
989 if (!NeedsExtraction)
1002 unsigned AddrSpace =
GEP->getPointerAddressSpace();
1004 nullptr, AccumulativeByteOffset,
1019 for (
unsigned I = 1,
E =
GEP->getNumOperands();
I !=
E; ++
I, ++GTI) {
1028 User *UserChainTail;
1030 ConstantOffsetExtractor::Extract(OldIdx,
GEP, UserChainTail, DT);
1031 if (NewIdx !=
nullptr) {
1033 GEP->setOperand(
I, NewIdx);
1061 bool GEPWasInBounds =
GEP->isInBounds();
1062 GEP->setIsInBounds(
false);
1072 unsigned AddrSpace =
GEP->getPointerAddressSpace();
1073 bool PointerHasExtraData =
DL->getPointerSizeInBits(AddrSpace) !=
1074 DL->getIndexSizeInBits(AddrSpace);
1075 if (
TTI.
useAA() ||
DL->isNonIntegralAddressSpace(AddrSpace) ||
1076 PointerHasExtraData)
1077 lowerToSingleIndexGEPs(
GEP, AccumulativeByteOffset);
1079 lowerToArithmetics(
GEP, AccumulativeByteOffset);
1084 if (AccumulativeByteOffset == 0)
1121 int64_t ElementTypeSizeOfGEP =
static_cast<int64_t
>(
1122 DL->getTypeAllocSize(
GEP->getResultElementType()));
1123 Type *PtrIdxTy =
DL->getIndexType(
GEP->getType());
1124 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1127 int64_t
Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1133 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1150 NewGEP = cast<Instruction>(
Builder.CreateGEP(
1152 {ConstantInt::get(PtrIdxTy, AccumulativeByteOffset, true)},
"uglygep",
1157 GEP->replaceAllUsesWith(NewGEP);
1158 GEP->eraseFromParent();
1163bool SeparateConstOffsetFromGEPLegacyPass::runOnFunction(
Function &
F) {
1164 if (skipFunction(
F))
1166 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1167 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1168 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
1170 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
1172 SeparateConstOffsetFromGEP Impl(DT, LI, TLI, GetTTI, LowerGEP);
1176bool SeparateConstOffsetFromGEP::run(
Function &
F) {
1180 DL = &
F.getParent()->getDataLayout();
1181 bool Changed =
false;
1188 Changed |= splitGEP(
GEP);
1193 Changed |= reuniteExts(
F);
1196 verifyNoDeadCode(
F);
1201Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1204 auto Pos = DominatingExprs.find(Key);
1205 if (Pos == DominatingExprs.end())
1208 auto &Candidates = Pos->second;
1213 while (!Candidates.empty()) {
1215 if (DT->
dominates(Candidate, Dominatee))
1217 Candidates.pop_back();
1222bool SeparateConstOffsetFromGEP::reuniteExts(
Instruction *
I) {
1223 if (!
I->getType()->isIntOrIntVectorTy())
1233 ExprKey
Key = createNormalizedCommutablePair(LHS, RHS);
1234 if (
auto *Dom = findClosestMatchingDominator(Key,
I, DominatingAdds)) {
1237 I->replaceAllUsesWith(NewSExt);
1245 findClosestMatchingDominator({
LHS,
RHS},
I, DominatingSubs)) {
1248 I->replaceAllUsesWith(NewSExt);
1258 ExprKey
Key = createNormalizedCommutablePair(LHS, RHS);
1259 DominatingAdds[
Key].push_back(
I);
1263 DominatingSubs[{
LHS,
RHS}].push_back(
I);
1268bool SeparateConstOffsetFromGEP::reuniteExts(
Function &
F) {
1269 bool Changed =
false;
1270 DominatingAdds.clear();
1271 DominatingSubs.clear();
1275 Changed |= reuniteExts(&
I);
1280void SeparateConstOffsetFromGEP::verifyNoDeadCode(
Function &
F) {
1284 std::string ErrMessage;
1286 RSO <<
"Dead instruction detected!\n" <<
I <<
"\n";
1293bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1295 if (!FirstGEP || !FirstGEP->
hasOneUse())
1301 if (FirstGEP == SecondGEP)
1307 if (FirstNum != SecondNum || FirstNum != 2)
1321 Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1332 if (FirstOffsetDef && FirstOffsetDef->
isShift() &&
1333 isa<ConstantInt>(FirstOffsetDef->
getOperand(1)))
1334 FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->
getOperand(0));
1339 if (
BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1341 if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1349bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(
Value *V,
Loop *L) {
1351 for (
User *U :
V->users()) {
1353 if (
L->contains(
User))
1354 if (++UsesInLoop > 1)
1364 First->setOperand(1, Offset2);
1370 cast<PointerType>(
First->getType())->getAddressSpace()),
1373 First->stripAndAccumulateInBoundsConstantOffsets(DAL,
Offset);
1376 Offset.ugt(ObjectSize)) {
1377 First->setIsInBounds(
false);
1380 First->setIsInBounds(
true);
1401 SeparateConstOffsetFromGEP Impl(DT, LI, TLI, GetTTI, LowerGEP);
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static const T * Find(StringRef S, ArrayRef< T > A)
Find KV in array using binary search.
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > DisableSeparateConstOffsetFromGEP("disable-separate-const-offset-from-gep", cl::init(false), cl::desc("Do not separate the constant offset from a GEP instruction"), cl::Hidden)
static cl::opt< bool > VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false), cl::desc("Verify this pass produces no dead code"), cl::Hidden)
separate const offset from gep
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
unsigned logBase2() const
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
LLVM Basic Block Representation.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
Represents analyses that only rely on functions' control flow.
This is the base class for all instructions that perform data casts.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const BasicBlock * getParent() const
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Analysis pass that exposes the LoopInfo for a function.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
This class represents a sign extension of integer types.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
An efficient, type-erasing, non-owning reference to a callable.
bool isSequential() const
StructType * getStructType() const
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
This is an optimization pass for GlobalISel generic memory operations.
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
auto reverse(ContainerTy &&C)
bool programUndefinedIfPoison(const Instruction *Inst)
void initializeSeparateConstOffsetFromGEPLegacyPassPass(PassRegistry &)
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
iterator_range< df_iterator< T > > depth_first(const T &G)
A CRTP mix-in to automatically provide informational APIs needed for passes.