Go to the documentation of this file.
27 R <<
" Volatile: " <<
NV(
"StoreVolatile",
true) <<
".";
29 R <<
" Atomic: " <<
NV(
"StoreAtomic",
true) <<
".";
33 if (!Volatile || !Atomic)
36 R <<
" Volatile: " <<
NV(
"StoreVolatile",
false) <<
".";
38 R <<
" Atomic: " <<
NV(
"StoreAtomic",
false) <<
".";
42 if (!SizeInBits || *SizeInBits % 8 != 0)
44 return *SizeInBits / 8;
48 bool Volatile =
SI.isVolatile();
49 bool Atomic =
SI.isAtomic();
50 int64_t
Size =
DL.getTypeStoreSize(
SI.getOperand(0)->getType());
53 R <<
"Store inserted by -ftrivial-auto-var-init.\nStore size: "
54 <<
NV(
"StoreSize",
Size) <<
" bytes.";
55 inspectDst(
SI.getOperand(1), R);
62 "AutoInitUnknownInstruction", &
I)
63 <<
"Initialization inserted by -ftrivial-auto-var-init.");
73 case Intrinsic::memmove:
76 case Intrinsic::memset:
79 case Intrinsic::memcpy_element_unordered_atomic:
83 case Intrinsic::memmove_element_unordered_atomic:
87 case Intrinsic::memset_element_unordered_atomic:
92 return inspectUnknown(II);
96 inspectCallee(
StringRef(CallTo),
true, R);
99 auto *CIVolatile = dyn_cast<ConstantInt>(II.
getOperand(3));
101 bool Volatile = !Atomic && CIVolatile && CIVolatile->getZExtValue();
110 return inspectUnknown(CI);
113 bool KnownLibCall = TLI.getLibFunc(*
F, LF) && TLI.has(LF);
115 inspectCallee(
F, KnownLibCall, R);
116 inspectKnownLibCall(CI, LF, R);
120 template <
typename FTy>
121 void AutoInitRemark::inspectCallee(FTy
F,
bool KnownLibCall,
125 R <<
NV(
"UnknownLibCall",
"unknown") <<
" function ";
126 R <<
NV(
"Callee",
F) <<
" inserted by -ftrivial-auto-var-init.";
142 if (
auto *Len = dyn_cast<ConstantInt>(V)) {
143 uint64_t
Size = Len->getZExtValue();
144 R <<
" Memory operation size: " <<
NV(
"StoreSize",
Size) <<
" bytes.";
148 void AutoInitRemark::inspectVariable(
const Value *V,
151 bool FoundDI =
false;
158 VariableInfo Var{DILV->getName(), DISize};
159 if (!Var.isEmpty()) {
170 const auto *AI = dyn_cast<AllocaInst>(V);
191 for (
const Value *V : Objects)
192 inspectVariable(V, VIs);
197 R <<
"\nVariables: ";
198 for (
unsigned i = 0;
i < VIs.size(); ++
i) {
199 const VariableInfo &
VI = VIs[
i];
200 assert(!
VI.isEmpty() &&
"No extra content to display.");
204 R <<
NV(
"VarName", *
VI.Name);
206 R <<
NV(
"VarName",
"<unknown>");
208 R <<
" (" <<
NV(
"VarSize", *
VI.Size) <<
" bytes)";
ScalarTy getFixedSize() const
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
DiagnosticInfoOptimizationBase::Argument NV
TinyPtrVector< DbgVariableIntrinsic * > FindDbgAddrUses(Value *V)
Finds all intrinsics declaring local variables as living in the memory that 'V' points to.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
This is the common base class for debug info intrinsics for variables.
An instruction for storing to memory.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
StringRef - Represent a constant reference to a string, i.e.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
DiagnosticInfoOptimizationBase::setExtraArgs setExtraArgs
A wrapper class for inspecting calls to intrinsic functions.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class represents a function call, abstracting a target machine's calling convention.
Add a small namespace to avoid name clashes with the classes used in the streaming interface.
Value * getOperand(unsigned i) const
LLVM Value Representation.