39#define DEBUG_TYPE "expandmemcmp"
42STATISTIC(NumMemCmpNotConstant,
"Number of memcmp calls without constant size");
44 "Number of memcmp calls with size greater than max size");
45STATISTIC(NumMemCmpInlined,
"Number of inlined memcmp calls");
49 cl::desc(
"The number of loads per basic block for inline expansion of "
50 "memcmp that is only being compared against zero."));
54 cl::desc(
"Set maximum number of loads used in expanded memcmp"));
58 cl::desc(
"Set maximum number of loads used in expanded memcmp for -Os/Oz"));
65class MemCmpExpansion {
71 ResultBlock() =
default;
77 unsigned MaxLoadSize = 0;
79 const uint64_t NumLoadsPerBlockForZeroCmp;
80 std::vector<BasicBlock *> LoadCmpBlocks;
83 const bool IsUsedForZeroCmp;
101 LoadEntryVector LoadSequence;
103 void createLoadCmpBlocks();
104 void createResultBlock();
105 void setupResultBlockPHINodes();
106 void setupEndBlockPHINodes();
107 Value *getCompareLoadPairs(
unsigned BlockIndex,
unsigned &LoadIndex);
108 void emitLoadCompareBlock(
unsigned BlockIndex);
109 void emitLoadCompareBlockMultipleLoads(
unsigned BlockIndex,
110 unsigned &LoadIndex);
111 void emitLoadCompareByteBlock(
unsigned BlockIndex,
unsigned OffsetBytes);
112 void emitMemCmpResultBlock();
113 Value *getMemCmpExpansionZeroCase();
114 Value *getMemCmpEqZeroOneBlock();
115 Value *getMemCmpOneBlock();
117 Value *Lhs =
nullptr;
118 Value *Rhs =
nullptr;
120 LoadPair getLoadPair(
Type *LoadSizeType,
bool NeedsBSwap,
Type *CmpSizeType,
121 unsigned OffsetBytes);
123 static LoadEntryVector
125 unsigned MaxNumLoads,
unsigned &NumLoadsNonOneByte);
126 static LoadEntryVector
127 computeOverlappingLoadSequence(
uint64_t Size,
unsigned MaxLoadSize,
128 unsigned MaxNumLoads,
129 unsigned &NumLoadsNonOneByte);
134 const bool IsUsedForZeroCmp,
const DataLayout &TheDataLayout,
137 unsigned getNumBlocks();
138 uint64_t getNumLoads()
const {
return LoadSequence.size(); }
140 Value *getMemCmpExpansion();
145 const unsigned MaxNumLoads,
unsigned &NumLoadsNonOneByte) {
146 NumLoadsNonOneByte = 0;
147 LoadEntryVector LoadSequence;
150 const unsigned LoadSize = LoadSizes.
front();
152 if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
159 if (NumLoadsForThisSize > 0) {
160 for (
uint64_t I = 0;
I < NumLoadsForThisSize; ++
I) {
161 LoadSequence.push_back({LoadSize,
Offset});
165 ++NumLoadsNonOneByte;
174MemCmpExpansion::computeOverlappingLoadSequence(
uint64_t Size,
175 const unsigned MaxLoadSize,
176 const unsigned MaxNumLoads,
177 unsigned &NumLoadsNonOneByte) {
179 if (
Size < 2 || MaxLoadSize < 2)
184 const uint64_t NumNonOverlappingLoads =
Size / MaxLoadSize;
185 assert(NumNonOverlappingLoads &&
"there must be at least one load");
188 Size =
Size - NumNonOverlappingLoads * MaxLoadSize;
195 if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
199 LoadEntryVector LoadSequence;
201 for (
uint64_t I = 0;
I < NumNonOverlappingLoads; ++
I) {
202 LoadSequence.push_back({MaxLoadSize,
Offset});
208 LoadSequence.push_back({MaxLoadSize,
Offset - (MaxLoadSize -
Size)});
209 NumLoadsNonOneByte = 1;
221MemCmpExpansion::MemCmpExpansion(
224 const bool IsUsedForZeroCmp,
const DataLayout &TheDataLayout,
226 : CI(CI),
Size(
Size), NumLoadsPerBlockForZeroCmp(
Options.NumLoadsPerBlock),
227 IsUsedForZeroCmp(IsUsedForZeroCmp),
DL(TheDataLayout), DTU(DTU),
235 assert(!LoadSizes.
empty() &&
"cannot load Size bytes");
236 MaxLoadSize = LoadSizes.
front();
238 unsigned GreedyNumLoadsNonOneByte = 0;
239 LoadSequence = computeGreedyLoadSequence(
Size, LoadSizes,
Options.MaxNumLoads,
240 GreedyNumLoadsNonOneByte);
241 NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
242 assert(LoadSequence.size() <=
Options.MaxNumLoads &&
"broken invariant");
245 if (
Options.AllowOverlappingLoads &&
246 (LoadSequence.empty() || LoadSequence.size() > 2)) {
247 unsigned OverlappingNumLoadsNonOneByte = 0;
248 auto OverlappingLoads = computeOverlappingLoadSequence(
249 Size, MaxLoadSize,
Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
250 if (!OverlappingLoads.empty() &&
251 (LoadSequence.empty() ||
252 OverlappingLoads.size() < LoadSequence.size())) {
253 LoadSequence = OverlappingLoads;
254 NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
257 assert(LoadSequence.size() <=
Options.MaxNumLoads &&
"broken invariant");
260unsigned MemCmpExpansion::getNumBlocks() {
261 if (IsUsedForZeroCmp)
262 return getNumLoads() / NumLoadsPerBlockForZeroCmp +
263 (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
264 return getNumLoads();
267void MemCmpExpansion::createLoadCmpBlocks() {
268 for (
unsigned i = 0; i < getNumBlocks(); i++) {
271 LoadCmpBlocks.push_back(BB);
275void MemCmpExpansion::createResultBlock() {
280MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(
Type *LoadSizeType,
283 unsigned OffsetBytes) {
289 if (OffsetBytes > 0) {
291 LhsSource =
Builder.CreateConstGEP1_64(
292 ByteType,
Builder.CreateBitCast(LhsSource, ByteType->getPointerTo()),
294 RhsSource =
Builder.CreateConstGEP1_64(
295 ByteType,
Builder.CreateBitCast(RhsSource, ByteType->getPointerTo()),
304 Value *Lhs =
nullptr;
305 if (
auto *
C = dyn_cast<Constant>(LhsSource))
308 Lhs =
Builder.CreateAlignedLoad(LoadSizeType, LhsSource, LhsAlign);
310 Value *Rhs =
nullptr;
311 if (
auto *
C = dyn_cast<Constant>(RhsSource))
314 Rhs =
Builder.CreateAlignedLoad(LoadSizeType, RhsSource, RhsAlign);
319 Intrinsic::bswap, LoadSizeType);
320 Lhs =
Builder.CreateCall(Bswap, Lhs);
321 Rhs =
Builder.CreateCall(Bswap, Rhs);
325 if (CmpSizeType !=
nullptr && CmpSizeType != LoadSizeType) {
326 Lhs =
Builder.CreateZExt(Lhs, CmpSizeType);
327 Rhs =
Builder.CreateZExt(Rhs, CmpSizeType);
336void MemCmpExpansion::emitLoadCompareByteBlock(
unsigned BlockIndex,
337 unsigned OffsetBytes) {
340 const LoadPair Loads =
347 if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
357 {{DominatorTree::Insert, BB, EndBlock},
358 {DominatorTree::Insert, BB, LoadCmpBlocks[BlockIndex + 1]}});
364 DTU->
applyUpdates({{DominatorTree::Insert, BB, EndBlock}});
371Value *MemCmpExpansion::getCompareLoadPairs(
unsigned BlockIndex,
372 unsigned &LoadIndex) {
373 assert(LoadIndex < getNumLoads() &&
374 "getCompareLoadPairs() called with no remaining loads");
375 std::vector<Value *> XorList, OrList;
376 Value *Diff =
nullptr;
378 const unsigned NumLoads =
379 std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
382 if (LoadCmpBlocks.empty())
385 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
392 NumLoads == 1 ? nullptr
394 for (
unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
395 const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
396 const LoadPair Loads = getLoadPair(
398 false, MaxLoadType, CurLoadEntry.Offset);
403 Diff =
Builder.CreateXor(Loads.Lhs, Loads.Rhs);
404 Diff =
Builder.CreateZExt(Diff, MaxLoadType);
405 XorList.push_back(Diff);
408 Cmp =
Builder.CreateICmpNE(Loads.Lhs, Loads.Rhs);
412 auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
413 std::vector<Value *> OutList;
414 for (
unsigned i = 0; i < InList.size() - 1; i = i + 2) {
416 OutList.push_back(
Or);
418 if (InList.size() % 2 != 0)
419 OutList.push_back(InList.back());
425 OrList = pairWiseOr(XorList);
428 while (OrList.size() != 1) {
429 OrList = pairWiseOr(OrList);
432 assert(Diff &&
"Failed to find comparison diff");
439void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(
unsigned BlockIndex,
440 unsigned &LoadIndex) {
441 Value *
Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
443 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
445 : LoadCmpBlocks[BlockIndex + 1];
452 DTU->
applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
453 {DominatorTree::Insert, BB, NextBB}});
458 if (BlockIndex == LoadCmpBlocks.size() - 1) {
460 PhiRes->
addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
473void MemCmpExpansion::emitLoadCompareBlock(
unsigned BlockIndex) {
475 const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
477 if (CurLoadEntry.LoadSize == 1) {
478 MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
485 assert(CurLoadEntry.LoadSize <= MaxLoadSize &&
"Unexpected load type");
487 Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
489 const LoadPair Loads =
490 getLoadPair(LoadSizeType,
DL.isLittleEndian(), MaxLoadType,
491 CurLoadEntry.Offset);
495 if (!IsUsedForZeroCmp) {
496 ResBlock.PhiSrc1->addIncoming(Loads.Lhs, LoadCmpBlocks[BlockIndex]);
497 ResBlock.PhiSrc2->addIncoming(Loads.Rhs, LoadCmpBlocks[BlockIndex]);
500 Value *
Cmp =
Builder.CreateICmp(ICmpInst::ICMP_EQ, Loads.Lhs, Loads.Rhs);
501 BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
503 : LoadCmpBlocks[BlockIndex + 1];
511 {DominatorTree::Insert, BB, ResBlock.BB}});
516 if (BlockIndex == LoadCmpBlocks.size() - 1) {
518 PhiRes->
addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
525void MemCmpExpansion::emitMemCmpResultBlock() {
528 if (IsUsedForZeroCmp) {
530 Builder.SetInsertPoint(ResBlock.BB, InsertPt);
536 DTU->
applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
540 Builder.SetInsertPoint(ResBlock.BB, InsertPt);
553 DTU->
applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
556void MemCmpExpansion::setupResultBlockPHINodes() {
558 Builder.SetInsertPoint(ResBlock.BB);
561 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte,
"phi.src1");
563 Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte,
"phi.src2");
566void MemCmpExpansion::setupEndBlockPHINodes() {
571Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
572 unsigned LoadIndex = 0;
575 for (
unsigned I = 0;
I < getNumBlocks(); ++
I) {
576 emitLoadCompareBlockMultipleLoads(
I, LoadIndex);
579 emitMemCmpResultBlock();
586Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
587 unsigned LoadIndex = 0;
588 Value *
Cmp = getCompareLoadPairs(0, LoadIndex);
589 assert(LoadIndex == getNumLoads() &&
"some entries were not consumed");
595Value *MemCmpExpansion::getMemCmpOneBlock() {
597 bool NeedsBSwap =
DL.isLittleEndian() &&
Size != 1;
602 const LoadPair Loads =
603 getLoadPair(LoadSizeType, NeedsBSwap,
Builder.getInt32Ty(),
605 return Builder.CreateSub(Loads.Lhs, Loads.Rhs);
608 const LoadPair Loads = getLoadPair(LoadSizeType, NeedsBSwap, LoadSizeType,
616 Value *CmpUGT =
Builder.CreateICmpUGT(Loads.Lhs, Loads.Rhs);
617 Value *CmpULT =
Builder.CreateICmpULT(Loads.Lhs, Loads.Rhs);
620 return Builder.CreateSub(ZextUGT, ZextULT);
625Value *MemCmpExpansion::getMemCmpExpansion() {
627 if (getNumBlocks() != 1) {
629 EndBlock =
SplitBlock(StartBlock, CI, DTU,
nullptr,
630 nullptr,
"endblock");
631 setupEndBlockPHINodes();
638 if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
641 createLoadCmpBlocks();
647 DTU->
applyUpdates({{DominatorTree::Insert, StartBlock, LoadCmpBlocks[0]},
648 {DominatorTree::Delete, StartBlock, EndBlock}});
653 if (IsUsedForZeroCmp)
654 return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
655 : getMemCmpExpansionZeroCase();
657 if (getNumBlocks() == 1)
658 return getMemCmpOneBlock();
660 for (
unsigned I = 0;
I < getNumBlocks(); ++
I) {
661 emitLoadCompareBlock(
I);
664 emitMemCmpResultBlock();
754 NumMemCmpNotConstant++;
764 const bool IsUsedForZeroCmp =
782 MemCmpExpansion Expansion(CI, SizeVal,
Options, IsUsedForZeroCmp, *
DL, DTU);
785 if (Expansion.getNumLoads() == 0) {
786 NumMemCmpGreaterThanMax++;
792 Value *Res = Expansion.getMemCmpExpansion();
812 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
817 TPC->getTM<
TargetMachine>().getSubtargetImpl(
F)->getTargetLowering();
820 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
822 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
823 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
825 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
828 if (
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
829 DT = &DTWP->getDomTree();
831 return !PA.areAllPreserved();
868 (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
869 expandMemCmp(CI,
TTI, TL, &
DL, PSI, BFI, DTU, Func == LibFunc_bcmp)) {
881 std::optional<DomTreeUpdater> DTU;
883 DTU.emplace(DT, DomTreeUpdater::UpdateStrategy::Lazy);
886 bool MadeChanges =
false;
887 for (
auto BBIt =
F.begin(); BBIt !=
F.end();) {
888 if (runOnBlock(*BBIt, TLI,
TTI, TL,
DL, PSI, BFI, DTU ? &*DTU :
nullptr)) {
909char ExpandMemCmpPass::ID = 0;
911 "Expand memcmp() to load/stores",
false,
false)
921 return new ExpandMemCmpPass();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool runImpl(Function &F, const TargetLowering &TLI)
static cl::opt< unsigned > MaxLoadsPerMemcmpOptSize("max-loads-per-memcmp-opt-size", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"))
Expand memcmp() to load/stores"
static cl::opt< unsigned > MaxLoadsPerMemcmp("max-loads-per-memcmp", cl::Hidden, cl::desc("Set maximum number of loads used in expanded memcmp"))
static cl::opt< unsigned > MemCmpEqZeroNumLoadsPerBlock("memcmp-num-loads-per-block", cl::Hidden, cl::init(1), cl::desc("The number of loads per basic block for inline expansion of " "memcmp that is only being compared against zero."))
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Target-Independent Code Generator Pass Configuration Options pass.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
void applyUpdates(ArrayRef< DominatorTree::UpdateType > Updates)
Submit updates to all available trees.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
FunctionPass class - This class is used to implement most global optimizations.
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserve()
Mark an analysis as preserved.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
The instances of the Type class are immutable: once they are created, they are never changed.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
void initializeExpandMemCmpPassPass(PassRegistry &)
FunctionPass * createExpandMemCmpPass()
@ Or
Bitwise or logical OR of integers.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
BasicBlock * SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
This struct is a compact representation of a valid (non-zero power of two) alignment.