LLVM  13.0.0git
Functions
Loads.cpp File Reference
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
Include dependency graph for Loads.cpp:

Go to the source code of this file.

Functions

static bool isAligned (const Value *Base, const APInt &Offset, Align Alignment, const DataLayout &DL)
 
static bool isDereferenceableAndAlignedPointer (const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl< const Value * > &Visited, unsigned MaxDepth)
 Test if V is always a pointer to allocated and suitably aligned memory for a simple load or store. More...
 
static bool AreEquivalentAddressValues (const Value *A, const Value *B)
 Test if A and B will obviously have the same value. More...
 
static bool areNonOverlapSameBaseLoadAndStore (const Value *LoadPtr, Type *LoadTy, const Value *StorePtr, Type *StoreTy, const DataLayout &DL)
 
static ValuegetAvailableLoadStore (Instruction *Inst, const Value *Ptr, Type *AccessTy, bool AtLeastAtomic, const DataLayout &DL, bool *IsLoadCSE)
 

Function Documentation

◆ AreEquivalentAddressValues()

static bool AreEquivalentAddressValues ( const Value A,
const Value B 
)
static

Test if A and B will obviously have the same value.

This includes recognizing that t0 and t1 will have the same value in code like this:

%t0 = getelementptr \@a, 0, 3
store i32 0, i32* %t0
%t1 = getelementptr \@a, 0, 3
%t2 = load i32* %t1

Definition at line 253 of file Loads.cpp.

References B.

Referenced by getAvailableLoadStore(), and llvm::isSafeToLoadUnconditionally().

◆ areNonOverlapSameBaseLoadAndStore()

static bool areNonOverlapSameBaseLoadAndStore ( const Value LoadPtr,
Type LoadTy,
const Value StorePtr,
Type StoreTy,
const DataLayout DL 
)
static

◆ getAvailableLoadStore()

static Value* getAvailableLoadStore ( Instruction Inst,
const Value Ptr,
Type AccessTy,
bool  AtLeastAtomic,
const DataLayout DL,
bool *  IsLoadCSE 
)
static

◆ isAligned()

static bool isAligned ( const Value Base,
const APInt Offset,
Align  Alignment,
const DataLayout DL 
)
static

◆ isDereferenceableAndAlignedPointer()

static bool isDereferenceableAndAlignedPointer ( const Value V,
Align  Alignment,
const APInt Size,
const DataLayout DL,
const Instruction CtxI,
const DominatorTree DT,
const TargetLibraryInfo TLI,
SmallPtrSetImpl< const Value * > &  Visited,
unsigned  MaxDepth 
)
static
store
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj obj **nth_el If the i64 division is lowered to a then a safe point array and nth_el no longer point into the correct object The fix for this is to copy address calculations so that dependent pointers are never live across safe point boundaries But the loads cannot be copied like this if there was an intervening store
Definition: README.txt:133
a
=0.0 ? 0.0 :(a > 0.0 ? 1.0 :-1.0) a
Definition: README.txt:489
i32
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32
Definition: README.txt:122
load
LLVM currently emits rax rax movq rax rax ret It could narrow the loads and stores to emit rax rax movq rax rax ret The trouble is that there is a TokenFactor between the store and the load
Definition: README.txt:1531
t1
<%struct.bf ** > define void t1() nounwind ssp
Definition: README.txt:1497