LLVM 19.0.0git
Macros | Functions | Variables
CodeGenPrepare.cpp File Reference
#include "llvm/CodeGen/CodeGenPrepare.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/ProfDataUtils.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
#include "llvm/Transforms/Utils/SizeOpts.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>

Go to the source code of this file.

Macros

#define DEBUG_TYPE   "codegenprepare"
 

Functions

 STATISTIC (NumBlocksElim, "Number of blocks eliminated")
 
 STATISTIC (NumPHIsElim, "Number of trivial PHIs eliminated")
 
 STATISTIC (NumGEPsElim, "Number of GEPs converted to casts")
 
 STATISTIC (NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps")
 
 STATISTIC (NumCastUses, "Number of uses of Cast expressions replaced with uses " "of sunken Casts")
 
 STATISTIC (NumMemoryInsts, "Number of memory instructions whose address " "computations were sunk")
 
 STATISTIC (NumMemoryInstsPhiCreated, "Number of phis created when address " "computations were sunk to memory instructions")
 
 STATISTIC (NumMemoryInstsSelectCreated, "Number of select created when address " "computations were sunk to memory instructions")
 
 STATISTIC (NumExtsMoved, "Number of [s|z]ext instructions combined with loads")
 
 STATISTIC (NumExtUses, "Number of uses of [s|z]ext instructions optimized")
 
 STATISTIC (NumAndsAdded, "Number of and mask instructions added to form ext loads")
 
 STATISTIC (NumAndUses, "Number of uses of and mask instructions optimized")
 
 STATISTIC (NumRetsDup, "Number of return instructions duplicated")
 
 STATISTIC (NumDbgValueMoved, "Number of debug value instructions moved")
 
 STATISTIC (NumSelectsExpanded, "Number of selects turned into branches")
 
 STATISTIC (NumStoreExtractExposed, "Number of store(extractelement) exposed")
 
 INITIALIZE_PASS_BEGIN (CodeGenPrepareLegacyPass, DEBUG_TYPE, "Optimize for code generation", false, false) INITIALIZE_PASS_END(CodeGenPrepareLegacyPass
 
static void replaceAllUsesWith (Value *Old, Value *New, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
 Replace all old uses with new ones, and push the updated BBs into FreshBBs.
 
static void computeBaseDerivedRelocateMap (const SmallVectorImpl< GCRelocateInst * > &AllRelocateCalls, DenseMap< GCRelocateInst *, SmallVector< GCRelocateInst *, 2 > > &RelocateInstMap)
 
static bool getGEPSmallConstantIntOffsetV (GetElementPtrInst *GEP, SmallVectorImpl< Value * > &OffsetV)
 
static bool simplifyRelocatesOffABase (GCRelocateInst *RelocatedBase, const SmallVectorImpl< GCRelocateInst * > &Targets)
 
static bool SinkCast (CastInst *CI)
 Sink the specified cast instruction into its user blocks.
 
static bool OptimizeNoopCopyExpression (CastInst *CI, const TargetLowering &TLI, const DataLayout &DL)
 If the specified cast instruction is a noop copy (e.g.
 
bool matchIncrement (const Instruction *IVInc, Instruction *&LHS, Constant *&Step)
 
static std::optional< std::pair< Instruction *, Constant * > > getIVIncrement (const PHINode *PN, const LoopInfo *LI)
 If given PN is an inductive variable with value IVInc coming from the backedge, and on each iteration it gets increased by Step, return pair <IVInc, Step>.
 
static bool isIVIncrement (const Value *V, const LoopInfo *LI)
 
static bool matchUAddWithOverflowConstantEdgeCases (CmpInst *Cmp, BinaryOperator *&Add)
 Match special-case patterns that check for unsigned add overflow.
 
static bool sinkCmpExpression (CmpInst *Cmp, const TargetLowering &TLI)
 Sink the given CmpInst into user blocks to reduce the number of virtual registers that must be created and coalesced.
 
static bool foldICmpWithDominatingICmp (CmpInst *Cmp, const TargetLowering &TLI)
 For pattern like:
 
static bool swapICmpOperandsToExposeCSEOpportunities (CmpInst *Cmp)
 Many architectures use the same instruction for both subtract and cmp.
 
static bool foldFCmpToFPClassTest (CmpInst *Cmp, const TargetLowering &TLI, const DataLayout &DL)
 
static bool sinkAndCmp0Expression (Instruction *AndI, const TargetLowering &TLI, SetOfInstrs &InsertedInsts)
 Duplicate and sink the given 'and' instruction into user blocks where it is used in a compare to allow isel to generate better code for targets where this operation can be combined.
 
static bool isExtractBitsCandidateUse (Instruction *User)
 Check if the candidates could be combined with a shift instruction, which includes:
 
static bool SinkShiftAndTruncate (BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, DenseMap< BasicBlock *, BinaryOperator * > &InsertedShifts, const TargetLowering &TLI, const DataLayout &DL)
 Sink both shift and truncate instruction to the use of truncate's BB.
 
static bool OptimizeExtractBits (BinaryOperator *ShiftI, ConstantInt *CI, const TargetLowering &TLI, const DataLayout &DL)
 Sink the shift right instruction into user blocks if the uses could potentially be combined with this shift instruction and generate BitExtract instruction.
 
static bool despeculateCountZeros (IntrinsicInst *CountZeros, LoopInfo &LI, const TargetLowering *TLI, const DataLayout *DL, ModifyDT &ModifiedDT, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
 If counting leading or trailing zeros is an expensive operation and a zero input is defined, add a check for zero to avoid calling the intrinsic.
 
static bool isIntrinsicOrLFToBeTailCalled (const TargetLibraryInfo *TLInfo, const CallInst *CI)
 
static bool MightBeFoldableInst (Instruction *I)
 This is a little filter, which returns true if an addressing computation involving I might be folded into a load/store accessing it.
 
static bool isPromotedInstructionLegal (const TargetLowering &TLI, const DataLayout &DL, Value *Val)
 Check whether or not Val is a legal instruction for TLI.
 
static bool IsOperandAMemoryOperand (CallInst *CI, InlineAsm *IA, Value *OpVal, const TargetLowering &TLI, const TargetRegisterInfo &TRI)
 Check to see if all uses of OpVal by the specified inline asm call are due to memory operands.
 
static bool FindAllMemoryUses (Instruction *I, SmallVectorImpl< std::pair< Use *, Type * > > &MemoryUses, SmallPtrSetImpl< Instruction * > &ConsideredInsts, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, unsigned &SeenInsts)
 Recursively walk all the uses of I until we find a memory use.
 
static bool FindAllMemoryUses (Instruction *I, SmallVectorImpl< std::pair< Use *, Type * > > &MemoryUses, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
 
static bool IsNonLocalValue (Value *V, BasicBlock *BB)
 Return true if the specified values are defined in a different basic block than BB.
 
static bool hasSameExtUse (Value *Val, const TargetLowering &TLI)
 Check if all the uses of Val are equivalent (or free) zero or sign extensions.
 
static bool sinkSelectOperand (const TargetTransformInfo *TTI, Value *V)
 Check if V (an operand of a select instruction) is an expensive instruction that is only used once.
 
static bool isFormingBranchFromSelectProfitable (const TargetTransformInfo *TTI, const TargetLowering *TLI, SelectInst *SI)
 Returns true if a SelectInst should be turned into an explicit branch.
 
static ValuegetTrueOrFalseValue (SelectInst *SI, bool isTrue, const SmallPtrSet< const Instruction *, 2 > &Selects)
 If isTrue is true, return the true value of SI, otherwise return false value of SI.
 
static bool splitMergedValStore (StoreInst &SI, const DataLayout &DL, const TargetLowering &TLI)
 For the instruction sequence of store below, F and I values are bundled together as an i64 value before being stored into memory.
 
static bool GEPSequentialConstIndexed (GetElementPtrInst *GEP)
 
static bool tryUnmergingGEPsAcrossIndirectBr (GetElementPtrInst *GEPI, const TargetTransformInfo *TTI)
 
static bool optimizeBranch (BranchInst *Branch, const TargetLowering &TLI, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
 
static void DbgInserterHelper (DbgValueInst *DVI, Instruction *VI)
 
static void DbgInserterHelper (DbgVariableRecord *DVR, Instruction *VI)
 
static void scaleWeights (uint64_t &NewTrue, uint64_t &NewFalse)
 Scale down both weights to fit into uint32_t.
 

Variables

static cl::opt< boolDisableBranchOpts ("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare"))
 
static cl::opt< boolDisableGCOpts ("disable-cgp-gc-opts", cl::Hidden, cl::init(false), cl::desc("Disable GC optimizations in CodeGenPrepare"))
 
static cl::opt< boolDisableSelectToBranch ("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion."))
 
static cl::opt< boolAddrSinkUsingGEPs ("addr-sink-using-gep", cl::Hidden, cl::init(true), cl::desc("Address sinking in CGP using GEPs."))
 
static cl::opt< boolEnableAndCmpSinking ("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinkinig and/cmp into branches."))
 
static cl::opt< boolDisableStoreExtract ("disable-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Disable store(extract) optimizations in CodeGenPrepare"))
 
static cl::opt< boolStressStoreExtract ("stress-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"))
 
static cl::opt< boolDisableExtLdPromotion ("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare"))
 
static cl::opt< boolStressExtLdPromotion ("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare"))
 
static cl::opt< boolDisablePreheaderProtect ("disable-preheader-prot", cl::Hidden, cl::init(false), cl::desc("Disable protection against removing loop preheaders"))
 
static cl::opt< boolProfileGuidedSectionPrefix ("profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use profile info to add section prefix for hot/cold functions"))
 
static cl::opt< boolProfileUnknownInSpecialSection ("profile-unknown-in-special-section", cl::Hidden, cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. "))
 
static cl::opt< boolBBSectionsGuidedSectionPrefix ("bbsections-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles."))
 
static cl::opt< uint64_tFreqRatioToSkipMerge ("cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio"))
 
static cl::opt< boolForceSplitStore ("force-split-store", cl::Hidden, cl::init(false), cl::desc("Force store splitting no matter what the target query says."))
 
static cl::opt< boolEnableTypePromotionMerge ("cgp-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), cl::init(true))
 
static cl::opt< boolDisableComplexAddrModes ("disable-complex-addr-modes", cl::Hidden, cl::init(false), cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst."))
 
static cl::opt< boolAddrSinkNewPhis ("addr-sink-new-phis", cl::Hidden, cl::init(false), cl::desc("Allow creation of Phis in Address sinking."))
 
static cl::opt< boolAddrSinkNewSelects ("addr-sink-new-select", cl::Hidden, cl::init(true), cl::desc("Allow creation of selects in Address sinking."))
 
static cl::opt< boolAddrSinkCombineBaseReg ("addr-sink-combine-base-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseReg field in Address sinking."))
 
static cl::opt< boolAddrSinkCombineBaseGV ("addr-sink-combine-base-gv", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseGV field in Address sinking."))
 
static cl::opt< boolAddrSinkCombineBaseOffs ("addr-sink-combine-base-offs", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseOffs field in Address sinking."))
 
static cl::opt< boolAddrSinkCombineScaledReg ("addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of ScaledReg field in Address sinking."))
 
static cl::opt< boolEnableGEPOffsetSplit ("cgp-split-large-offset-gep", cl::Hidden, cl::init(true), cl::desc("Enable splitting large offset of GEP."))
 
static cl::opt< boolEnableICMP_EQToICMP_ST ("cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."))
 
static cl::opt< boolVerifyBFIUpdates ("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), cl::desc("Enable BFI update verification for " "CodeGenPrepare."))
 
static cl::opt< boolOptimizePhiTypes ("cgp-optimize-phi-types", cl::Hidden, cl::init(true), cl::desc("Enable converting phi types in CodeGenPrepare"))
 
static cl::opt< unsignedHugeFuncThresholdInCGPP ("cgpp-huge-func", cl::init(10000), cl::Hidden, cl::desc("Least BB number of huge function."))
 
static cl::opt< unsignedMaxAddressUsersToScan ("cgp-max-address-users-to-scan", cl::init(100), cl::Hidden, cl::desc("Max number of address users to look at"))
 
static cl::opt< boolDisableDeletePHIs ("disable-cgp-delete-phis", cl::Hidden, cl::init(false), cl::desc("Disable elimination of dead PHI nodes."))
 
 DEBUG_TYPE
 
Optimize for code generation
 
Optimize for code false
 

Macro Definition Documentation

◆ DEBUG_TYPE

#define DEBUG_TYPE   "codegenprepare"

Definition at line 109 of file CodeGenPrepare.cpp.

Function Documentation

◆ computeBaseDerivedRelocateMap()

static void computeBaseDerivedRelocateMap ( const SmallVectorImpl< GCRelocateInst * > &  AllRelocateCalls,
DenseMap< GCRelocateInst *, SmallVector< GCRelocateInst *, 2 > > &  RelocateInstMap 
)
static

◆ DbgInserterHelper() [1/2]

static void DbgInserterHelper ( DbgValueInst DVI,
Instruction VI 
)
static

◆ DbgInserterHelper() [2/2]

static void DbgInserterHelper ( DbgVariableRecord DVR,
Instruction VI 
)
static

◆ despeculateCountZeros()

static bool despeculateCountZeros ( IntrinsicInst CountZeros,
LoopInfo LI,
const TargetLowering TLI,
const DataLayout DL,
ModifyDT &  ModifiedDT,
SmallSet< BasicBlock *, 32 > &  FreshBBs,
bool  IsHugeFunc 
)
static

If counting leading or trailing zeros is an expensive operation and a zero input is defined, add a check for zero to avoid calling the intrinsic.

We want to transform: z = call i64 @llvm.cttz.i64(i64 A, i1 false)

into: entry: cmpz = icmp eq i64 A, 0 br i1 cmpz, label cond.end, label cond.false cond.false: z = call i64 @llvm.cttz.i64(i64 A, i1 true) br label cond.end cond.end: ctz = phi i64 [ 64, entry ], [ z, cond.false ]

If the transform is performed, return true and set ModifiedDT to true.

Definition at line 2293 of file CodeGenPrepare.cpp.

References llvm::PHINode::addIncoming(), llvm::BasicBlock::begin(), llvm::BitWidth, llvm::IRBuilderBase::CreateCondBr(), llvm::IRBuilderBase::CreateFreeze(), llvm::IRBuilderBase::CreateICmpEQ(), llvm::IRBuilderBase::CreatePHI(), DL, llvm::Instruction::eraseFromParent(), llvm::Value::getContext(), llvm::Instruction::getDebugLoc(), llvm::IRBuilderBase::getInt(), llvm::IntrinsicInst::getIntrinsicID(), llvm::LoopInfoBase< BlockT, LoopT >::getLoopFor(), llvm::Constant::getNullValue(), llvm::User::getOperand(), llvm::User::getOperandUse(), llvm::Instruction::getParent(), llvm::Type::getScalarSizeInBits(), llvm::BasicBlock::getTerminator(), llvm::IRBuilderBase::getTrue(), llvm::Value::getType(), llvm::SmallSet< T, N, C >::insert(), llvm::TargetLoweringBase::isCheapToSpeculateCtlz(), llvm::TargetLoweringBase::isCheapToSpeculateCttz(), llvm::isGuaranteedNotToBeUndefOrPoison(), llvm::isKnownNonZero(), llvm::Type::isVectorTy(), llvm::PatternMatch::m_One(), llvm::PatternMatch::match(), replaceAllUsesWith(), llvm::CallBase::setArgOperand(), llvm::IRBuilderBase::SetCurrentDebugLocation(), llvm::IRBuilderBase::SetInsertPoint(), and llvm::BasicBlock::splitBasicBlock().

◆ FindAllMemoryUses() [1/2]

static bool FindAllMemoryUses ( Instruction I,
SmallVectorImpl< std::pair< Use *, Type * > > &  MemoryUses,
const TargetLowering TLI,
const TargetRegisterInfo TRI,
bool  OptSize,
ProfileSummaryInfo PSI,
BlockFrequencyInfo BFI 
)
static

Definition at line 5274 of file CodeGenPrepare.cpp.

References FindAllMemoryUses(), I, and TRI.

◆ FindAllMemoryUses() [2/2]

static bool FindAllMemoryUses ( Instruction I,
SmallVectorImpl< std::pair< Use *, Type * > > &  MemoryUses,
SmallPtrSetImpl< Instruction * > &  ConsideredInsts,
const TargetLowering TLI,
const TargetRegisterInfo TRI,
bool  OptSize,
ProfileSummaryInfo PSI,
BlockFrequencyInfo BFI,
unsigned SeenInsts 
)
static

Recursively walk all the uses of I until we find a memory use.

If we find an obviously non-foldable instruction, return true. Add accessed addresses and types to MemoryUses.

Definition at line 5199 of file CodeGenPrepare.cpp.

References FindAllMemoryUses(), llvm::StoreInst::getPointerOperandIndex(), llvm::AtomicCmpXchgInst::getPointerOperandIndex(), llvm::AtomicRMWInst::getPointerOperandIndex(), getType(), I, llvm::SmallPtrSetImpl< PtrType >::insert(), IsOperandAMemoryOperand(), MaxAddressUsersToScan, MightBeFoldableInst(), llvm::shouldOptimizeForSize(), and TRI.

Referenced by FindAllMemoryUses().

◆ foldFCmpToFPClassTest()

static bool foldFCmpToFPClassTest ( CmpInst Cmp,
const TargetLowering TLI,
const DataLayout DL 
)
static

◆ foldICmpWithDominatingICmp()

static bool foldICmpWithDominatingICmp ( CmpInst Cmp,
const TargetLowering TLI 
)
static

For pattern like:

DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) ... DomBB: ... br DomCond, TrueBB, CmpBB CmpBB: (with DomBB being the single predecessor) ... Cmp = icmp eq CmpOp0, CmpOp1 ...

It would use two comparison on targets that lowering of icmp sgt/slt is different from lowering of icmp eq (PowerPC). This function try to convert 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. After that, DomCond and Cmp can use the same comparison so reduce one comparison.

Return true if any changes are made.

Definition at line 1847 of file CodeGenPrepare.cpp.

References assert(), EnableICMP_EQToICMP_ST, llvm::BasicBlock::getSinglePredecessor(), llvm::CmpInst::getSwappedPredicate(), llvm::BasicBlock::getTerminator(), llvm::CmpInst::ICMP_EQ, llvm::CmpInst::ICMP_SGT, llvm::CmpInst::ICMP_SLT, llvm::BranchInst::isConditional(), llvm::TargetLoweringBase::isEqualityCmpFoldedWithSignedCmp(), llvm_unreachable, llvm::PatternMatch::m_Br(), llvm::PatternMatch::m_ICmp(), llvm::PatternMatch::m_Specific(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::match(), and llvm::BranchInst::swapSuccessors().

◆ GEPSequentialConstIndexed()

static bool GEPSequentialConstIndexed ( GetElementPtrInst GEP)
static

Definition at line 8045 of file CodeGenPrepare.cpp.

References GEP, llvm::gep_type_begin(), and I.

Referenced by tryUnmergingGEPsAcrossIndirectBr().

◆ getGEPSmallConstantIntOffsetV()

static bool getGEPSmallConstantIntOffsetV ( GetElementPtrInst GEP,
SmallVectorImpl< Value * > &  OffsetV 
)
static

◆ getIVIncrement()

static std::optional< std::pair< Instruction *, Constant * > > getIVIncrement ( const PHINode PN,
const LoopInfo LI 
)
static

If given PN is an inductive variable with value IVInc coming from the backedge, and on each iteration it gets increased by Step, return pair <IVInc, Step>.

Otherwise, return std::nullopt.

Definition at line 1523 of file CodeGenPrepare.cpp.

References llvm::PHINode::getIncomingValueForBlock(), llvm::LoopInfoBase< BlockT, LoopT >::getLoopFor(), llvm::Instruction::getParent(), LHS, and matchIncrement().

Referenced by isIVIncrement().

◆ getTrueOrFalseValue()

static Value * getTrueOrFalseValue ( SelectInst SI,
bool  isTrue,
const SmallPtrSet< const Instruction *, 2 > &  Selects 
)
static

If isTrue is true, return the true value of SI, otherwise return false value of SI.

If the true/false value of SI is defined by any select instructions in Selects, look through the defining select instruction until the true/false value is not defined in Selects.

Definition at line 7008 of file CodeGenPrepare.cpp.

References assert(), and llvm::SmallPtrSetImpl< PtrType >::count().

◆ hasSameExtUse()

static bool hasSameExtUse ( Value Val,
const TargetLowering TLI 
)
static

◆ INITIALIZE_PASS_BEGIN()

INITIALIZE_PASS_BEGIN ( CodeGenPrepareLegacyPass  ,
DEBUG_TYPE  ,
"Optimize for code generation"  ,
false  ,
false   
)

◆ isExtractBitsCandidateUse()

static bool isExtractBitsCandidateUse ( Instruction User)
static

Check if the candidates could be combined with a shift instruction, which includes:

  1. Truncate instruction
  2. And instruction and the imm is a mask of the low bits: imm & (imm+1) == 0

Definition at line 2082 of file CodeGenPrepare.cpp.

References llvm::User::getOperand().

Referenced by OptimizeExtractBits().

◆ isFormingBranchFromSelectProfitable()

static bool isFormingBranchFromSelectProfitable ( const TargetTransformInfo TTI,
const TargetLowering TLI,
SelectInst SI 
)
static

◆ isIntrinsicOrLFToBeTailCalled()

static bool isIntrinsicOrLFToBeTailCalled ( const TargetLibraryInfo TLInfo,
const CallInst CI 
)
static

◆ isIVIncrement()

static bool isIVIncrement ( const Value V,
const LoopInfo LI 
)
static

Definition at line 1538 of file CodeGenPrepare.cpp.

References getIVIncrement(), I, LHS, and matchIncrement().

◆ IsNonLocalValue()

static bool IsNonLocalValue ( Value V,
BasicBlock BB 
)
static

Return true if the specified values are defined in a different basic block than BB.

Definition at line 5420 of file CodeGenPrepare.cpp.

References I.

◆ IsOperandAMemoryOperand()

static bool IsOperandAMemoryOperand ( CallInst CI,
InlineAsm IA,
Value OpVal,
const TargetLowering TLI,
const TargetRegisterInfo TRI 
)
static

Check to see if all uses of OpVal by the specified inline asm call are due to memory operands.

If so, return true, otherwise return false.

Definition at line 5174 of file CodeGenPrepare.cpp.

References llvm::TargetLowering::C_Memory, llvm::TargetLowering::ComputeConstraintToUse(), F, llvm::Instruction::getFunction(), llvm::TargetLowering::ParseConstraints(), and TRI.

Referenced by FindAllMemoryUses().

◆ isPromotedInstructionLegal()

static bool isPromotedInstructionLegal ( const TargetLowering TLI,
const DataLayout DL,
Value Val 
)
static

Check whether or not Val is a legal instruction for TLI.

Note
Val is assumed to be the product of some type promotion. Therefore if Val has an undefined state in TLI, this is assumed to be legal, as the non-promoted value would have had the same state.

Definition at line 4357 of file CodeGenPrepare.cpp.

References DL, llvm::Instruction::getOpcode(), llvm::Value::getType(), llvm::TargetLoweringBase::getValueType(), llvm::TargetLoweringBase::InstructionOpcodeToISD(), and llvm::TargetLoweringBase::isOperationLegalOrCustom().

◆ matchIncrement()

bool matchIncrement ( const Instruction IVInc,
Instruction *&  LHS,
Constant *&  Step 
)

◆ matchUAddWithOverflowConstantEdgeCases()

static bool matchUAddWithOverflowConstantEdgeCases ( CmpInst Cmp,
BinaryOperator *&  Add 
)
static

◆ MightBeFoldableInst()

static bool MightBeFoldableInst ( Instruction I)
static

This is a little filter, which returns true if an addressing computation involving I might be folded into a load/store accessing it.

This doesn't need to be perfect, but needs to accept at least the set of instructions that MatchOperationAddr can.

Definition at line 4326 of file CodeGenPrepare.cpp.

References I.

Referenced by FindAllMemoryUses().

◆ optimizeBranch()

static bool optimizeBranch ( BranchInst Branch,
const TargetLowering TLI,
SmallSet< BasicBlock *, 32 > &  FreshBBs,
bool  IsHugeFunc 
)
static

◆ OptimizeExtractBits()

static bool OptimizeExtractBits ( BinaryOperator ShiftI,
ConstantInt CI,
const TargetLowering TLI,
const DataLayout DL 
)
static

Sink the shift right instruction into user blocks if the uses could potentially be combined with this shift instruction and generate BitExtract instruction.

It will only be applied if the architecture supports BitExtract instruction. Here is an example: BB1: x.extract.shift = lshr i64 arg1, 32 BB2: x.extract.trunc = trunc i64 x.extract.shift to i16 ==>

BB2: x.extract.shift.1 = lshr i64 arg1, 32 x.extract.trunc = trunc i64 x.extract.shift.1 to i16

CodeGen will recognize the pattern in BB2 and generate BitExtract instruction. Return true if any changes are made.

Only insert instructions in each block once.

Definition at line 2191 of file CodeGenPrepare.cpp.

References assert(), DL, llvm::BasicBlock::end(), llvm::Instruction::eraseFromParent(), llvm::Instruction::getDebugLoc(), llvm::BasicBlock::getFirstInsertionPt(), llvm::BinaryOperator::getOpcode(), llvm::User::getOperand(), llvm::Instruction::getParent(), llvm::Value::getType(), llvm::TargetLoweringBase::getValueType(), llvm::Instruction::insertBefore(), isExtractBitsCandidateUse(), llvm::TargetLoweringBase::isTypeLegal(), llvm::salvageDebugInfo(), llvm::Instruction::setDebugLoc(), SinkShiftAndTruncate(), llvm::Value::use_empty(), llvm::Value::user_begin(), and llvm::Value::user_end().

◆ OptimizeNoopCopyExpression()

static bool OptimizeNoopCopyExpression ( CastInst CI,
const TargetLowering TLI,
const DataLayout DL 
)
static

If the specified cast instruction is a noop copy (e.g.

it's casting from one pointer type to another, i32->i8 on PPC), sink it into user blocks to reduce the number of virtual registers that must be created and coalesced.

Return true if any changes are made.

Definition at line 1461 of file CodeGenPrepare.cpp.

References llvm::EVT::bitsLT(), DL, llvm::Value::getContext(), llvm::User::getOperand(), llvm::Value::getType(), llvm::TargetLoweringBase::getTypeAction(), llvm::TargetLoweringBase::getTypeToTransformTo(), llvm::TargetLoweringBase::getValueType(), llvm::TargetLoweringBase::isFreeAddrSpaceCast(), llvm::EVT::isInteger(), SinkCast(), and llvm::TargetLoweringBase::TypePromoteInteger.

◆ replaceAllUsesWith()

static void replaceAllUsesWith ( Value Old,
Value New,
SmallSet< BasicBlock *, 32 > &  FreshBBs,
bool  IsHuge 
)
static

Replace all old uses with new ones, and push the updated BBs into FreshBBs.

Definition at line 1112 of file CodeGenPrepare.cpp.

References llvm::SmallSet< T, N, C >::insert(), llvm::Value::replaceAllUsesWith(), and llvm::Value::user_begin().

Referenced by despeculateCountZeros(), and optimizeBranch().

◆ scaleWeights()

static void scaleWeights ( uint64_t NewTrue,
uint64_t NewFalse 
)
static

Scale down both weights to fit into uint32_t.

Definition at line 8694 of file CodeGenPrepare.cpp.

◆ simplifyRelocatesOffABase()

static bool simplifyRelocatesOffABase ( GCRelocateInst RelocatedBase,
const SmallVectorImpl< GCRelocateInst * > &  Targets 
)
static

◆ sinkAndCmp0Expression()

static bool sinkAndCmp0Expression ( Instruction AndI,
const TargetLowering TLI,
SetOfInstrs &  InsertedInsts 
)
static

◆ SinkCast()

static bool SinkCast ( CastInst CI)
static

◆ sinkCmpExpression()

static bool sinkCmpExpression ( CmpInst Cmp,
const TargetLowering TLI 
)
static

Sink the given CmpInst into user blocks to reduce the number of virtual registers that must be created and coalesced.

This is a clear win except on targets with multiple condition code registers (PowerPC), where it might lose; some adjustment may be wanted there.

Return true if any changes are made.

Definition at line 1768 of file CodeGenPrepare.cpp.

References assert(), llvm::CmpInst::Create(), llvm::BasicBlock::end(), llvm::BasicBlock::getFirstInsertionPt(), llvm::TargetLoweringBase::hasMultipleConditionRegisters(), llvm::Instruction::insertBefore(), llvm::Instruction::setDebugLoc(), and llvm::TargetLoweringBase::useSoftFloat().

◆ sinkSelectOperand()

static bool sinkSelectOperand ( const TargetTransformInfo TTI,
Value V 
)
static

Check if V (an operand of a select instruction) is an expensive instruction that is only used once.

Definition at line 6954 of file CodeGenPrepare.cpp.

References I, llvm::TargetTransformInfo::isExpensiveToSpeculativelyExecute(), and llvm::isSafeToSpeculativelyExecute().

Referenced by isFormingBranchFromSelectProfitable().

◆ SinkShiftAndTruncate()

static bool SinkShiftAndTruncate ( BinaryOperator ShiftI,
Instruction User,
ConstantInt CI,
DenseMap< BasicBlock *, BinaryOperator * > &  InsertedShifts,
const TargetLowering TLI,
const DataLayout DL 
)
static

◆ splitMergedValStore()

static bool splitMergedValStore ( StoreInst SI,
const DataLayout DL,
const TargetLowering TLI 
)
static

For the instruction sequence of store below, F and I values are bundled together as an i64 value before being stored into memory.

Sometimes it is more efficient to generate separate stores for F and I, which can remove the bitwise instructions or sink them to colder places.

(store (or (zext (bitcast F to i32) to i64), (shl (zext I to i64), 32)), addr) --> (store F, addr) and (store I, addr+4)

Similarly, splitting for other merged store can also be beneficial, like: For pair of {i32, i32}, i64 store --> two i32 stores. For pair of {i32, i16}, i64 store --> two i32 stores. For pair of {i16, i16}, i32 store --> two i16 stores. For pair of {i16, i8}, i32 store --> two i16 stores. For pair of {i8, i8}, i16 store --> two i8 stores.

We allow each target to determine specifically which kind of splitting is supported.

The store patterns are commonly seen from the simple code snippet below if only std::make_pair(...) is sroa transformed before inlined into hoo. void goo(const std::pair<int, float> &); hoo() { ... goo(std::make_pair(tmp, ftmp)); ... }

Although we already have similar splitting in DAG Combine, we duplicate it in CodeGenPrepare to catch the case in which pattern is across multiple BBs. The logic in DAG Combine is kept to catch case generated during code expansion.

Definition at line 7947 of file CodeGenPrepare.cpp.

References Addr, llvm::commonAlignment(), llvm::IRBuilderBase::CreateAlignedStore(), llvm::IRBuilderBase::CreateBitCast(), llvm::IRBuilderBase::CreateGEP(), llvm::IRBuilderBase::CreateZExtOrBitCast(), DL, ForceSplitStore, llvm::EVT::getEVT(), llvm::Type::getInt32Ty(), llvm::Type::getIntNTy(), llvm::Value::getType(), llvm::Type::isIntegerTy(), llvm::TargetLoweringBase::isMultiStoresCheaperThanBitsMerge(), llvm::Type::isScalableTy(), LValue, llvm::PatternMatch::m_c_Or(), llvm::PatternMatch::m_OneUse(), llvm::PatternMatch::m_Shl(), llvm::PatternMatch::m_SpecificInt(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_ZExt(), llvm::PatternMatch::match(), llvm::IRBuilderBase::SetInsertPoint(), and llvm::Upper.

◆ STATISTIC() [1/16]

STATISTIC ( NumAndsAdded  ,
"Number of and mask instructions added to form ext loads"   
)

◆ STATISTIC() [2/16]

STATISTIC ( NumAndUses  ,
"Number of uses of and mask instructions optimized"   
)

◆ STATISTIC() [3/16]

STATISTIC ( NumBlocksElim  ,
"Number of blocks eliminated"   
)

◆ STATISTIC() [4/16]

STATISTIC ( NumCastUses  ,
"Number of uses of Cast expressions replaced with uses " "of sunken Casts"   
)

◆ STATISTIC() [5/16]

STATISTIC ( NumCmpUses  ,
"Number of uses of Cmp expressions replaced with uses of " "sunken Cmps"   
)

◆ STATISTIC() [6/16]

STATISTIC ( NumDbgValueMoved  ,
"Number of debug value instructions moved"   
)

◆ STATISTIC() [7/16]

STATISTIC ( NumExtsMoved  ,
"Number of ext instructions combined with loads"  [s|z] 
)

◆ STATISTIC() [8/16]

STATISTIC ( NumExtUses  ,
"Number of uses of ext instructions optimized"  [s|z] 
)

◆ STATISTIC() [9/16]

STATISTIC ( NumGEPsElim  ,
"Number of GEPs converted to casts"   
)

◆ STATISTIC() [10/16]

STATISTIC ( NumMemoryInsts  ,
"Number of memory instructions whose address " "computations were sunk"   
)

◆ STATISTIC() [11/16]

STATISTIC ( NumMemoryInstsPhiCreated  ,
"Number of phis created when address " "computations were sunk to memory instructions"   
)

◆ STATISTIC() [12/16]

STATISTIC ( NumMemoryInstsSelectCreated  ,
"Number of select created when address " "computations were sunk to memory instructions"   
)

◆ STATISTIC() [13/16]

STATISTIC ( NumPHIsElim  ,
"Number of trivial PHIs eliminated"   
)

◆ STATISTIC() [14/16]

STATISTIC ( NumRetsDup  ,
"Number of return instructions duplicated"   
)

◆ STATISTIC() [15/16]

STATISTIC ( NumSelectsExpanded  ,
"Number of selects turned into branches"   
)

◆ STATISTIC() [16/16]

STATISTIC ( NumStoreExtractExposed  ,
"Number of store(extractelement) exposed"   
)

◆ swapICmpOperandsToExposeCSEOpportunities()

static bool swapICmpOperandsToExposeCSEOpportunities ( CmpInst Cmp)
static

Many architectures use the same instruction for both subtract and cmp.

Try to swap cmp operands to match subtract operations to allow for CSE.

Definition at line 1917 of file CodeGenPrepare.cpp.

References llvm::Value::getType(), llvm::Type::isIntegerTy(), llvm::PatternMatch::m_Specific(), llvm::PatternMatch::m_Sub(), llvm::PatternMatch::match(), and llvm::Value::users().

◆ tryUnmergingGEPsAcrossIndirectBr()

static bool tryUnmergingGEPsAcrossIndirectBr ( GetElementPtrInst GEPI,
const TargetTransformInfo TTI 
)
static

Variable Documentation

◆ AddrSinkCombineBaseGV

cl::opt< bool > AddrSinkCombineBaseGV("addr-sink-combine-base-gv", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseGV field in Address sinking.")) ( "addr-sink-combine-base-gv"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Allow combining of BaseGV field in Address sinking.")   
)
static

◆ AddrSinkCombineBaseOffs

cl::opt< bool > AddrSinkCombineBaseOffs("addr-sink-combine-base-offs", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseOffs field in Address sinking.")) ( "addr-sink-combine-base-offs"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Allow combining of BaseOffs field in Address sinking.")   
)
static

◆ AddrSinkCombineBaseReg

cl::opt< bool > AddrSinkCombineBaseReg("addr-sink-combine-base-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseReg field in Address sinking.")) ( "addr-sink-combine-base-reg"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Allow combining of BaseReg field in Address sinking.")   
)
static

◆ AddrSinkCombineScaledReg

cl::opt< bool > AddrSinkCombineScaledReg("addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of ScaledReg field in Address sinking.")) ( "addr-sink-combine-scaled-reg"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Allow combining of ScaledReg field in Address sinking.")   
)
static

◆ AddrSinkNewPhis

cl::opt< bool > AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), cl::desc("Allow creation of Phis in Address sinking.")) ( "addr-sink-new-phis"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Allow creation of Phis in Address sinking.")   
)
static

◆ AddrSinkNewSelects

cl::opt< bool > AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), cl::desc("Allow creation of selects in Address sinking.")) ( "addr-sink-new-select"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Allow creation of selects in Address sinking.")   
)
static

◆ AddrSinkUsingGEPs

cl::opt< bool > AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true), cl::desc("Address sinking in CGP using GEPs.")) ( "addr-sink-using-gep"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Address sinking in CGP using GEPs.")   
)
static

◆ BBSectionsGuidedSectionPrefix

cl::opt< bool > BBSectionsGuidedSectionPrefix("bbsections-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles.")) ( "bbsections-guided-section-prefix"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles.")   
)
static

◆ DEBUG_TYPE

DEBUG_TYPE

Definition at line 537 of file CodeGenPrepare.cpp.

◆ DisableBranchOpts

cl::opt< bool > DisableBranchOpts("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare")) ( "disable-cgp-branch-opts"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable branch optimizations in CodeGenPrepare")   
)
static

◆ DisableComplexAddrModes

cl::opt< bool > DisableComplexAddrModes("disable-complex-addr-modes", cl::Hidden, cl::init(false), cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst.")) ( "disable-complex-addr-modes"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst.")   
)
static

◆ DisableDeletePHIs

cl::opt< bool > DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false), cl::desc("Disable elimination of dead PHI nodes.")) ( "disable-cgp-delete-phis"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable elimination of dead PHI nodes.")   
)
static

◆ DisableExtLdPromotion

cl::opt< bool > DisableExtLdPromotion("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare")) ( "disable-cgp-ext-ld-promotion"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare")   
)
static

◆ DisableGCOpts

cl::opt< bool > DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), cl::desc("Disable GC optimizations in CodeGenPrepare")) ( "disable-cgp-gc-opts"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable GC optimizations in CodeGenPrepare")   
)
static

◆ DisablePreheaderProtect

cl::opt< bool > DisablePreheaderProtect("disable-preheader-prot", cl::Hidden, cl::init(false), cl::desc("Disable protection against removing loop preheaders")) ( "disable-preheader-prot"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable protection against removing loop preheaders")   
)
static

◆ DisableSelectToBranch

cl::opt< bool > DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion.")) ( "disable-cgp-select2branch"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable select to branch conversion.")   
)
static

◆ DisableStoreExtract

cl::opt< bool > DisableStoreExtract("disable-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Disable store(extract) optimizations in CodeGenPrepare")) ( "disable-cgp-store-extract"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Disable store(extract) optimizations in CodeGenPrepare")   
)
static

◆ EnableAndCmpSinking

cl::opt< bool > EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinkinig and/cmp into branches.")) ( "enable-andcmp-sinking"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Enable sinkinig and/cmp into branches.")   
)
static

◆ EnableGEPOffsetSplit

cl::opt< bool > EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, cl::init(true), cl::desc("Enable splitting large offset of GEP.")) ( "cgp-split-large-offset-gep"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Enable splitting large offset of GEP.")   
)
static

◆ EnableICMP_EQToICMP_ST

cl::opt< bool > EnableICMP_EQToICMP_ST("cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")) ( "cgp-icmp-eq2icmp-st"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")   
)
static

◆ EnableTypePromotionMerge

cl::opt< bool > EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), cl::init(true)) ( "cgp-type-promotion-merge"  ,
cl::Hidden  ,
cl::desc("Enable merging of redundant sexts when one is dominating" " the other.")  ,
cl::init(true  
)
static

◆ false

Optimize for code false

Definition at line 538 of file CodeGenPrepare.cpp.

◆ ForceSplitStore

cl::opt< bool > ForceSplitStore("force-split-store", cl::Hidden, cl::init(false), cl::desc("Force store splitting no matter what the target query says.")) ( "force-split-store"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Force store splitting no matter what the target query says.")   
)
static

Referenced by splitMergedValStore().

◆ FreqRatioToSkipMerge

cl::opt< uint64_t > FreqRatioToSkipMerge("cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio")) ( "cgp-freq-ratio-to-skip-merge"  ,
cl::Hidden  ,
cl::init(2)  ,
cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio")   
)
static

◆ generation

Optimize for code generation

Definition at line 538 of file CodeGenPrepare.cpp.

◆ HugeFuncThresholdInCGPP

cl::opt< unsigned > HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden, cl::desc("Least BB number of huge function.")) ( "cgpp-huge-func"  ,
cl::init(10000)  ,
cl::Hidden  ,
cl::desc("Least BB number of huge function.")   
)
static

◆ MaxAddressUsersToScan

cl::opt< unsigned > MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100), cl::Hidden, cl::desc("Max number of address users to look at")) ( "cgp-max-address-users-to-scan"  ,
cl::init(100)  ,
cl::Hidden  ,
cl::desc("Max number of address users to look at")   
)
static

Referenced by FindAllMemoryUses().

◆ OptimizePhiTypes

cl::opt< bool > OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true), cl::desc("Enable converting phi types in CodeGenPrepare")) ( "cgp-optimize-phi-types"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Enable converting phi types in CodeGenPrepare")   
)
static

◆ ProfileGuidedSectionPrefix

cl::opt< bool > ProfileGuidedSectionPrefix("profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use profile info to add section prefix for hot/cold functions")) ( "profile-guided-section-prefix"  ,
cl::Hidden  ,
cl::init(true ,
cl::desc("Use profile info to add section prefix for hot/cold functions")   
)
static

◆ ProfileUnknownInSpecialSection

cl::opt< bool > ProfileUnknownInSpecialSection("profile-unknown-in-special-section", cl::Hidden, cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. ")) ( "profile-unknown-in-special-section"  ,
cl::Hidden  ,
cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. ")   
)
static

◆ StressExtLdPromotion

cl::opt< bool > StressExtLdPromotion("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare")) ( "stress-cgp-ext-ld-promotion"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare")   
)
static

◆ StressStoreExtract

cl::opt< bool > StressStoreExtract("stress-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")) ( "stress-cgp-store-extract"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")   
)
static

◆ VerifyBFIUpdates

cl::opt< bool > VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), cl::desc("Enable BFI update verification for " "CodeGenPrepare.")) ( "cgp-verify-bfi-updates"  ,
cl::Hidden  ,
cl::init(false)  ,
cl::desc("Enable BFI update verification for " "CodeGenPrepare.")   
)
static