42#define DEBUG_TYPE "loadstore-opt"
46using namespace MIPatternMatch;
48STATISTIC(NumStoresMerged,
"Number of stores merged");
67 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
90 Info.IsIndexSignExt =
false;
96 Info.Offset = RHSCst->Value.getSExtValue();
100 Info.IndexReg = PtrAddRHS;
101 Info.IsIndexSignExt =
false;
109 auto *LdSt1 = dyn_cast<GLoadStore>(&MI1);
110 auto *LdSt2 = dyn_cast<GLoadStore>(&MI2);
111 if (!LdSt1 || !LdSt2)
135 IsAlias = !((int64_t)Size1.
getValue() <= PtrDiff);
142 IsAlias = !((PtrDiff + (int64_t)Size2.
getValue()) <= 0);
154 if (!Base0Def || !Base1Def)
158 if (Base0Def->getOpcode() != Base1Def->getOpcode())
161 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
165 if (Base0Def != Base1Def &&
175 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
176 auto GV0 = Base0Def->getOperand(1).getGlobal();
177 auto GV1 = Base1Def->getOperand(1).getGlobal();
192 struct MemUseCharacteristics {
201 auto getCharacteristics =
203 if (
const auto *LS = dyn_cast<GLoadStore>(
MI)) {
209 BaseReg = LS->getPointerReg();
214 return {LS->isVolatile(), LS->isAtomic(), BaseReg,
226 MemUseCharacteristics MUC0 = getCharacteristics(&
MI),
227 MUC1 = getCharacteristics(&
Other);
230 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&
231 MUC0.Offset == MUC1.Offset)
235 if (MUC0.IsVolatile && MUC1.IsVolatile)
240 if (MUC0.IsAtomic && MUC1.IsAtomic)
245 if (MUC0.MMO && MUC1.MMO) {
246 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
247 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
253 if ((MUC0.NumBytes.isScalable() && MUC0.Offset != 0) ||
254 (MUC1.NumBytes.isScalable() && MUC1.Offset != 0))
257 const bool BothNotScalable =
258 !MUC0.NumBytes.isScalable() && !MUC1.NumBytes.isScalable();
263 if (BothNotScalable &&
268 if (!MUC0.MMO || !MUC1.MMO)
272 int64_t SrcValOffset0 = MUC0.MMO->getOffset();
273 int64_t SrcValOffset1 = MUC1.MMO->getOffset();
276 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() && Size0.
hasValue() &&
279 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
290 MemoryLocation(MUC0.MMO->getValue(), Loc0, MUC0.MMO->getAAInfo()),
291 MemoryLocation(MUC1.MMO->getValue(), Loc1, MUC1.MMO->getAAInfo())))
302 return MI.hasUnmodeledSideEffects() ||
MI.hasOrderedMemoryRef();
308 assert(StoresToMerge.
size() > 1 &&
"Expected multiple stores to merge");
309 LLT OrigTy = MRI->
getType(StoresToMerge[0]->getValueReg());
310 LLT PtrTy = MRI->
getType(StoresToMerge[0]->getPointerReg());
313 initializeStoreMergeTargetInfo(AS);
314 const auto &LegalSizes = LegalStoreSizes[AS];
317 for (
auto *StoreMI : StoresToMerge)
322 bool AnyMerged =
false;
327 unsigned MergeSizeBits;
328 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {
332 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&
340 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.
getSizeInBits();
343 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);
344 AnyMerged |= doSingleStoreMerge(SingleMergeStores);
345 StoresToMerge.erase(StoresToMerge.begin(),
346 StoresToMerge.begin() + NumStoresToMerge);
347 }
while (StoresToMerge.size() > 1);
351bool LoadStoreOpt::isLegalOrBeforeLegalizer(
const LegalityQuery &Query,
357 return IsPreLegalizer ||
Action == LegalizeAction::Legal;
367 GStore *FirstStore = Stores[0];
368 const unsigned NumStores = Stores.
size();
385 for (
auto *Store : Stores) {
389 ConstantVals.
clear();
398 if (ConstantVals.
empty()) {
407 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {WideValueTy}}, *MF))
420 <<
" stores into merged store: " << *NewStore);
422 NumStoresMerged += Stores.size();
429 R <<
"Merged " <<
NV(
"NumMerged", Stores.size()) <<
" stores of "
431 <<
" bytes into a single store of "
436 for (
auto *
MI : Stores)
437 InstsToErase.insert(
MI);
441bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &
C) {
442 if (
C.Stores.size() < 2) {
447 LLVM_DEBUG(
dbgs() <<
"Checking store merge candidate with " <<
C.Stores.size()
448 <<
" stores, starting with " << *
C.Stores[0]);
462 auto DoesStoreAliasWithPotential = [&](
unsigned Idx,
GStore &CheckStore) {
463 for (
auto AliasInfo :
reverse(
C.PotentialAliases)) {
465 unsigned PreCheckedIdx = AliasInfo.second;
466 if (
static_cast<unsigned>(
Idx) < PreCheckedIdx) {
484 for (
int StoreIdx =
C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {
485 auto *CheckStore =
C.Stores[StoreIdx];
486 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))
492 <<
" stores remaining after alias checks. Merging...\n");
496 if (StoresToMerge.
size() < 2)
498 return mergeStores(StoresToMerge);
502 StoreMergeCandidate &
C) {
503 if (
C.Stores.empty())
506 return instMayAlias(MI, *OtherMI, *MRI, AA);
510void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(
MachineInstr &
MI) {
511 PotentialAliases.emplace_back(std::make_pair(&
MI, Stores.size() - 1));
514bool LoadStoreOpt::addStoreToCandidate(
GStore &StoreMI,
515 StoreMergeCandidate &
C) {
539 if (
C.Stores.empty()) {
545 C.BasePtr = StoreBase;
546 C.CurrentLowestOffset = StoreOffCst;
547 C.Stores.emplace_back(&StoreMI);
548 LLVM_DEBUG(
dbgs() <<
"Starting a new merge candidate group with: "
564 if (
C.BasePtr != StoreBase)
571 C.Stores.emplace_back(&StoreMI);
578 bool Changed =
false;
580 StoreMergeCandidate Candidate;
582 if (InstsToErase.contains(&
MI))
585 if (
auto *StoreMI = dyn_cast<GStore>(&
MI)) {
588 if (!addStoreToCandidate(*StoreMI, Candidate)) {
591 if (operationAliasesWithCandidate(*StoreMI, Candidate)) {
592 Changed |= processMergeCandidate(Candidate);
595 Candidate.addPotentialAlias(*StoreMI);
601 if (Candidate.Stores.empty())
606 Changed |= processMergeCandidate(Candidate);
607 Candidate.Stores.clear();
611 if (!
MI.mayLoadOrStore())
614 if (operationAliasesWithCandidate(
MI, Candidate)) {
617 Changed |= processMergeCandidate(Candidate);
623 Candidate.addPotentialAlias(
MI);
627 Changed |= processMergeCandidate(Candidate);
630 for (
auto *
MI : InstsToErase)
631 MI->eraseFromParent();
632 InstsToErase.clear();
642static std::optional<int64_t>
660 if (!SrcVal.
isValid() || TruncVal == SrcVal) {
668 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
669 if (ShiftAmt % NarrowBits != 0)
671 const unsigned Offset = ShiftAmt / NarrowBits;
673 if (SrcVal.
isValid() && FoundSrcVal != SrcVal)
677 SrcVal = FoundSrcVal;
678 else if (
MRI.getType(SrcVal) !=
MRI.getType(FoundSrcVal))
705bool LoadStoreOpt::mergeTruncStore(
GStore &StoreMI,
733 auto &LastStore = StoreMI;
738 if (!
mi_match(LastStore.getPointerReg(), *MRI,
740 BaseReg = LastStore.getPointerReg();
744 GStore *LowestIdxStore = &LastStore;
745 int64_t LowestIdxOffset = LastOffset;
757 const unsigned NumStoresRequired =
761 OffsetMap[*LowestShiftAmt] = LastOffset;
764 const int MaxInstsToCheck = 10;
765 int NumInstsChecked = 0;
766 for (
auto II = ++LastStore.getReverseIterator();
767 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
771 if ((NewStore = dyn_cast<GStore>(&*II))) {
774 }
else if (II->isLoadFoldBarrier() || II->mayLoad()) {
788 if (BaseReg != NewBaseReg)
792 if (!ShiftByteOffset)
794 if (MemOffset < LowestIdxOffset) {
795 LowestIdxOffset = MemOffset;
796 LowestIdxStore = NewStore;
801 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
802 OffsetMap[*ShiftByteOffset] !=
INT64_MAX)
804 OffsetMap[*ShiftByteOffset] = MemOffset;
809 if (FoundStores.
size() == NumStoresRequired)
813 if (FoundStores.
size() != NumStoresRequired) {
814 if (FoundStores.
size() == 1)
822 unsigned NumStoresFound = FoundStores.
size();
824 const auto &
DL = LastStore.getMF()->getDataLayout();
825 auto &
C = LastStore.getMF()->getFunction().getContext();
830 if (!Allowed || !
Fast)
836 auto checkOffsets = [&](
bool MatchLittleEndian) {
837 if (MatchLittleEndian) {
838 for (
unsigned i = 0; i != NumStoresFound; ++i)
839 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
842 for (
unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;
844 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
851 bool NeedBswap =
false;
852 bool NeedRotate =
false;
853 if (!checkOffsets(
DL.isLittleEndian())) {
855 if (NarrowBits == 8 && checkOffsets(
DL.isBigEndian()))
857 else if (NumStoresFound == 2 && checkOffsets(
DL.isBigEndian()))
864 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}, *MF))
867 !isLegalOrBeforeLegalizer(
868 {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, *MF))
873 if (WideStoreTy != MRI->
getType(WideSrcVal))
878 }
else if (NeedRotate) {
880 "Unexpected type for rotate");
892 for (
auto *ST : FoundStores) {
893 ST->eraseFromParent();
900 bool Changed =
false;
905 if (
auto *StoreMI = dyn_cast<GStore>(&
MI))
908 for (
auto *StoreMI : Stores) {
909 if (DeletedStores.
count(StoreMI))
911 if (mergeTruncStore(*StoreMI, DeletedStores))
918 bool Changed =
false;
920 Changed |= mergeBlockStores(BB);
921 Changed |= mergeTruncStoresBlock(BB);
926 for (
auto &BB : MF) {
937void LoadStoreOpt::initializeStoreMergeTargetInfo(
unsigned AddrSpace) {
942 if (LegalStoreSizes.count(AddrSpace)) {
943 assert(LegalStoreSizes[AddrSpace].
any());
949 const auto &LI = *MF->getSubtarget().getLegalizerInfo();
963 LegalSizes.set(
Size);
965 assert(LegalSizes.any() &&
"Expected some store sizes to be legal!");
966 LegalStoreSizes[AddrSpace] = LegalSizes;
979 bool Changed =
false;
980 Changed |= mergeFunctionStores(MF);
982 LegalStoreSizes.clear();
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Performs the initial survey of the specified function
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Generic memory optimizations
const unsigned MaxStoreSizeToForm
static std::optional< int64_t > getTruncStoreByteOffset(GStore &Store, Register &SrcVal, MachineRegisterInfo &MRI)
Check if the store Store is a truncstore that can be merged.
static bool isInstHardMergeHazard(MachineInstr &MI)
Returns true if the instruction creates an unavoidable hazard that forces a boundary between store me...
Implement a low-level type suitable for MachineInstr level instruction selection.
Contains matchers for matching SSA Machine Instructions.
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
Class for arbitrary precision integers.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
Register getValueReg() const
Get the stored value register.
Module * getParent()
Get the module that this global value is contained inside of...
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr unsigned getAddressSpace() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
reverse_iterator rbegin()
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
MachineInstrBuilder buildRotateRight(const DstOp &Dst, const SrcOp &Src, const SrcOp &Amt)
Build and insert Dst = G_ROTR Src, Amt.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0)
Build and insert Dst = G_BSWAP Src0.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
void setDebugLoc(const DebugLoc &DL)
Set the debug location to DL for all the next build instructions.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
void setMF(MachineFunction &MF)
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
const MachineBasicBlock * getParent() const
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MachinePointerInfo & getPointerInfo() const
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Representation for a specific memory location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
virtual const LegalizerInfo * getLegalizerInfo() const
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
constexpr ScalarTy getFixedValue() const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool any(E Val)
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool aliasIsKnownForLoadStore(const MachineInstr &MI1, const MachineInstr &MI2, bool &IsAlias, MachineRegisterInfo &MRI)
Compute whether or not a memory access at MI1 aliases with an access at MI2.
BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI)
Returns a BaseIndexOffset which describes the pointer in Ptr.
bool instMayAlias(const MachineInstr &MI, const MachineInstr &Other, MachineRegisterInfo &MRI, AliasAnalysis *AA)
Returns true if the instruction MI may alias Other.
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
@ Unsupported
This operation is completely unsupported on the target.
operand_type_match m_Reg()
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ASHR, false > m_GAShr(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
Or< Preds... > m_any_of(Preds &&... preds)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
ManagedStatic< cl::opt< FnT >, OptCreatorT > Action
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Implement std::hash so that hash_code can be used in STL containers.
Helper struct to store a base, index and offset that forms an address.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
LegalizeAction Action
The action to take or the final answer.