Go to the documentation of this file.
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
38 if (Indices && Indices == IndicesEnd)
42 if (
StructType *STy = dyn_cast<StructType>(Ty)) {
45 if (Indices && *Indices ==
I.index())
49 assert(!Indices &&
"Unexpected out of bound");
53 else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
59 assert(*Indices < NumElts &&
"Unexpected out of bound");
62 CurIndex += EltLinearOffset* *Indices;
65 CurIndex += EltLinearOffset*NumElts;
85 if (
StructType *STy = dyn_cast<StructType>(Ty)) {
92 EE = STy->element_end();
97 StartingOffset + EltOffset);
102 if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
103 Type *EltTy = ATy->getElementType();
104 uint64_t EltSize =
DL.getTypeAllocSize(EltTy).getFixedValue();
105 for (
unsigned i = 0,
e = ATy->getNumElements();
i !=
e; ++
i)
107 StartingOffset +
i * EltSize);
118 Offsets->push_back(StartingOffset);
134 if (
StructType *STy = dyn_cast<StructType>(&Ty)) {
139 for (
unsigned I = 0,
E = STy->getNumElements();
I !=
E; ++
I) {
142 StartingOffset + EltOffset);
147 if (
ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
148 Type *EltTy = ATy->getElementType();
149 uint64_t EltSize =
DL.getTypeAllocSize(EltTy).getFixedValue();
150 for (
unsigned i = 0,
e = ATy->getNumElements();
i !=
e; ++
i)
152 StartingOffset +
i * EltSize);
161 Offsets->push_back(StartingOffset * 8);
170 if (Var && Var->
getName() ==
"llvm.eh.catch.all.value") {
172 "The EH catch-all value must have an initializer");
174 GV = dyn_cast<GlobalValue>(
Init);
175 if (!GV) V = cast<ConstantPointerNull>(
Init);
178 assert((GV || isa<ConstantPointerNull>(V)) &&
179 "TypeInfo must be a global variable or NULL");
268 (isa<VectorType>(
T1) && isa<VectorType>(T2) &&
292 if (!
I ||
I->getNumOperands() == 0)
return V;
293 const Value *NoopInput =
nullptr;
296 if (isa<BitCastInst>(
I)) {
300 }
else if (isa<GetElementPtrInst>(
I)) {
302 if (cast<GetElementPtrInst>(
I)->hasAllZeroIndices())
304 }
else if (isa<IntToPtrInst>(
I)) {
308 if (!isa<VectorType>(
I->getType()) &&
309 DL.getPointerSizeInBits() ==
310 cast<IntegerType>(
Op->getType())->getBitWidth())
312 }
else if (isa<PtrToIntInst>(
I)) {
316 if (!isa<VectorType>(
I->getType()) &&
317 DL.getPointerSizeInBits() ==
318 cast<IntegerType>(
I->getType())->getBitWidth())
320 }
else if (isa<TruncInst>(
I) &&
323 I->getType()->getPrimitiveSizeInBits().getFixedSize());
325 }
else if (
auto *CB = dyn_cast<CallBase>(
I)) {
326 const Value *ReturnedOp = CB->getReturnedArgOperand();
328 NoopInput = ReturnedOp;
329 }
else if (
const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
332 if (ValLoc.size() >= InsertLoc.
size() &&
337 ValLoc.
resize(ValLoc.size() - InsertLoc.
size());
338 NoopInput = IVI->getInsertedValueOperand();
367 bool AllowDifferingSizes,
375 unsigned BitsRequired = UINT_MAX;
380 if (isa<UndefValue>(RetVal))
387 unsigned BitsProvided = UINT_MAX;
392 if (
CallVal != RetVal || CallIndices != RetIndices)
399 if (BitsProvided < BitsRequired ||
400 (!AllowDifferingSizes && BitsProvided != BitsRequired))
410 return Idx < AT->getNumElements();
412 return Idx < cast<StructType>(
T)->getNumElements();
436 while (!Path.empty() && !
indexReallyValid(SubTypes.back(), Path.back() + 1)) {
454 SubTypes.push_back(DeeperType);
479 SubTypes.push_back(Next);
508 assert(!Path.empty() &&
"found a leaf but didn't set the path?");
535 if (!
Ret && ((!
TM.Options.GuaranteedTailCallOpt &&
538 !isa<UnreachableInst>(
Term)))
549 if (BBI->isDebugOrPseudoInst())
554 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
555 II->getIntrinsicID() == Intrinsic::assume ||
556 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
558 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
565 F, &Call,
Ret, *
TM.getSubtargetImpl(*F)->getTargetLowering());
571 bool *AllowDifferingSizes) {
574 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
577 AttrBuilder CallerAttrs(
F->getContext(),
F->getAttributes().getRetAttrs());
579 cast<CallInst>(
I)->getAttributes().getRetAttrs());
583 for (
const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
584 Attribute::DereferenceableOrNull, Attribute::NoAlias,
585 Attribute::NonNull, Attribute::NoUndef}) {
590 if (CallerAttrs.
contains(Attribute::ZExt)) {
591 if (!CalleeAttrs.
contains(Attribute::ZExt))
597 }
else if (CallerAttrs.
contains(Attribute::SExt)) {
598 if (!CalleeAttrs.
contains(Attribute::SExt))
616 if (
I->use_empty()) {
624 return CallerAttrs == CalleeAttrs;
630 assert(A &&
B &&
"Expected non-null inputs!");
632 auto *BitCastIn = dyn_cast<BitCastInst>(
B);
637 if (!A->getType()->isPointerTy() || !
B->getType()->isPointerTy())
640 return A == BitCastIn->getOperand(0);
649 if (!
Ret ||
Ret->getNumOperands() == 0)
return true;
653 if (isa<UndefValue>(
Ret->getOperand(0)))
return true;
656 bool AllowDifferingSizes;
666 const CallInst *Call = cast<CallInst>(
I);
667 if (
Function *
F = Call->getCalledFunction()) {
671 (IID == Intrinsic::memmove &&
673 (IID == Intrinsic::memset &&
675 (RetVal == Call->getArgOperand(0) ||
718 AllowDifferingSizes, TLI,
719 F->getParent()->getDataLayout()))
732 while (!Worklist.empty()) {
739 auto P = EHScopeMembership.
insert(std::make_pair(Visiting, EHScope));
743 assert(
P.first->second == EHScope &&
"MBB is part of two scopes!");
762 return EHScopeMembership;
775 EHScopeBlocks.push_back(&
MBB);
777 SEHCatchPads.push_back(&
MBB);
779 UnreachableBlocks.push_back(&
MBB);
793 CatchRetSuccessors.push_back(
798 if (EHScopeBlocks.empty())
799 return EHScopeMembership;
813 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
817 return EHScopeMembership;
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
This is an optimization pass for GlobalISel generic memory operations.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
Return a value (possibly void), from a function.
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
A parsed version of the target data layout string in and methods for querying it.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
bool isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
const Function * getParent() const
Return the enclosing method, or null if none.
bool isPointerTy() const
True if this is an instance of PointerType.
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
virtual const TargetInstrInfo * getInstrInfo() const
Type::subtype_iterator element_iterator
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
static bool indexReallyValid(Type *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
@ ICMP_SGT
signed greater than
static bool isPointerBitcastEqualTo(const Value *A, const Value *B)
Check whether B is a bitcast of a pointer type to another pointer type, which is equal to A.
The instances of the Type class are immutable: once they are created, they are never changed.
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
@ ICMP_SLE
signed less or equal
LLVM_NODISCARD T pop_back_val()
Class to represent array types.
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
bool hasInitializer() const
Definitions have initializers, declarations don't.
LLVM Basic Block Representation.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
@ FCMP_ULT
1 1 0 0 True if unordered or less than
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
const MachineBasicBlock & front() const
TargetInstrInfo - Interface to description of machine instruction set.
@ Tail
Tail - This calling convention attemps to make calls as fast as possible while guaranteeing that tail...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
static bool advanceToNextLeafType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
@ ICMP_ULE
unsigned less or equal
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
const HexagonInstrInfo * TII
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
reverse_iterator rbegin() const
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
reverse_iterator rend() const
bool isEHScopeReturnBlock() const
Convenience function that returns true if the bock ends in a EH scope return instruction.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
static bool nextRealType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI)
Test if given that the input instruction is in the tail call position if the return type or any attri...
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Primary interface to the complete machine description for the target machine.
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isVoidTy() const
Return true if this is 'void'.
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
static void collectEHScopeMembers(DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, int EHScope, const MachineBasicBlock *MBB)
@ ICMP_SLT
signed less than
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Class to represent struct types.
iterator_range< succ_iterator > successors()
bool isEHPad() const
Returns true if the block is a landing pad.
StringRef - Represent a constant reference to a string, i.e.
MachineBasicBlock MachineBasicBlock::iterator MBBI
@ ICMP_ULT
unsigned less than
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Function & getFunction()
Return the LLVM function that this machine code represents.
uint64_t getElementOffset(unsigned Idx) const
@ ICMP_SGE
signed greater or equal
Constant * getPersonalityFn() const
Get the personality function associated with this function.
A wrapper class for inspecting calls to intrinsic functions.
@ ICMP_UGT
unsigned greater than
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
size_t size() const
size - Get the array size.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static bool firstRealType(Type *Next, SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
const char LLVMTargetMachineRef TM
This class represents a function call, abstracting a target machine's calling convention.
bool isAggregateType() const
Return true if the type is an aggregate type.
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
This instruction inserts a struct field of array element value into an aggregate value.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
LLVM Value Representation.
@ SwiftTail
SwiftTail - This follows the Swift calling convention in how arguments are passed but guarantees tail...
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
InstListType::const_iterator const_iterator
@ FCMP_UEQ
1 0 0 1 True if unordered or equal