23 case Instruction::Add:
24 case Instruction::Sub:
25 case Instruction::Mul:
26 case Instruction::Shl: {
27 auto *OBO = cast<OverflowingBinaryOperator>(
this);
28 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
30 case Instruction::UDiv:
31 case Instruction::SDiv:
32 case Instruction::AShr:
33 case Instruction::LShr:
34 return cast<PossiblyExactOperator>(
this)->isExact();
35 case Instruction::GetElementPtr: {
36 auto *
GEP = cast<GEPOperator>(
this);
38 return GEP->isInBounds() ||
GEP->getInRangeIndex() != std::nullopt;
41 if (
const auto *
FP = dyn_cast<FPMathOperator>(
this))
42 return FP->hasNoNaNs() ||
FP->hasNoInfs();
50 auto *
I = dyn_cast<Instruction>(
this);
51 return I &&
I->hasPoisonGeneratingMetadata();
55 if (
auto *
I = dyn_cast<GetElementPtrInst>(
this))
56 return I->getSourceElementType();
57 return cast<GetElementPtrConstantExpr>(
this)->getSourceElementType();
61 if (
auto *
I = dyn_cast<GetElementPtrInst>(
this))
62 return I->getResultElementType();
63 return cast<GetElementPtrConstantExpr>(
this)->getResultElementType();
74 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
76 if (
StructType *STy = GTI.getStructTypeOrNull()) {
80 assert(GTI.isSequential() &&
"should be sequencial");
84 Offset =
DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
96 "The offset bit width does not match DL specification.");
105 bool UsedExternalAnalysis =
false;
110 if (!UsedExternalAnalysis) {
115 bool Overflow =
false;
126 SourceType,
Index.begin());
128 for (
auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
132 Value *V = GTI.getOperand();
135 if (
auto ConstOffset = dyn_cast<ConstantInt>(V)) {
136 if (ConstOffset->isZero())
144 unsigned ElementIdx = ConstOffset->getZExtValue();
147 if (!AccumulateOffset(
153 if (!AccumulateOffset(ConstOffset->getValue(),
154 DL.getTypeAllocSize(GTI.getIndexedType())))
161 if (!ExternalAnalysis || STy || ScalableType)
164 if (!ExternalAnalysis(*V, AnalysisIndex))
166 UsedExternalAnalysis =
true;
167 if (!AccumulateOffset(AnalysisIndex,
168 DL.getTypeAllocSize(GTI.getIndexedType())))
177 APInt &ConstantOffset)
const {
179 "The offset bit width does not match DL specification.");
184 ConstantOffset +=
Index * IndexedSize;
190 bool ScalableType = GTI.getIndexedType()->isScalableTy();
192 Value *V = GTI.getOperand();
195 if (
auto ConstOffset = dyn_cast<ConstantInt>(V)) {
196 if (ConstOffset->isZero())
207 unsigned ElementIdx = ConstOffset->getZExtValue();
214 CollectConstantOffset(ConstOffset->getValue(),
215 DL.getTypeAllocSize(GTI.getIndexedType()));
219 if (STy || ScalableType)
225 if (!IndexedSize.
isZero()) {
227 VariableOffsets[V] += IndexedSize;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt smul_ov(const APInt &RHS, bool &Overflow) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
A parsed version of the target data layout string in and methods for querying it.
bool noSignedZeros() const
bool allowReciprocal() const
void print(raw_ostream &O) const
Print fast-math flags to O.
bool allowReassoc() const
Flag queries.
bool allowContract() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
Collect the offset of this GEP as a map of Values to their associated APInt multipliers,...
Type * getSourceElementType() const
Type * getResultElementType() const
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset, function_ref< bool(Value &, APInt &)> ExternalAnalysis=nullptr) const
Accumulate the constant address offset of this GEP if possible.
Align getMaxPreservedAlignment(const DataLayout &DL) const
Compute the maximum alignment that this GEP is garranteed to preserve.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
This class implements a map that also provides access to all stored values in a deterministic order.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasPoisonGeneratingFlags() const
Return true if this operator has flags which may cause this operator to evaluate to poison despite ha...
bool hasPoisonGeneratingFlagsOrMetadata() const
Return true if this operator has poison-generating flags or metadata.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
static constexpr uint64_t MaximumAlignment
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
gep_type_iterator gep_type_end(const User *GEP)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
constexpr unsigned BitWidth
gep_type_iterator gep_type_begin(const User *GEP)
This struct is a compact representation of a valid (non-zero power of two) alignment.