Go to the documentation of this file.
27 constexpr
const char *itanium_demangle::FloatData<float>::spec;
28 constexpr
const char *itanium_demangle::FloatData<double>::spec;
29 constexpr
const char *itanium_demangle::FloatData<long double>::spec;
39 const char *
t1 = first + 1;
41 if (std::isdigit(*
t1))
43 else if (*
t1 ==
'_') {
50 }
else if (std::isdigit(*first)) {
51 const char *
t1 = first + 1;
65 bool PendingNewline =
false;
67 template<
typename NodeT>
static constexpr
bool wantsNewline(
const NodeT *) {
70 static bool wantsNewline(
NodeArray A) {
return !
A.empty(); }
71 static constexpr
bool wantsNewline(...) {
return false; }
73 template<
typename ...Ts>
static bool anyWantNewline(Ts ...Vs) {
74 for (
bool B : {wantsNewline(Vs)...})
80 void printStr(
const char *
S) { fprintf(stderr,
"%s",
S); }
81 void print(StringView SV) {
82 fprintf(stderr,
"\"%.*s\"", (
int)SV.size(), SV.begin());
84 void print(
const Node *
N) {
86 N->visit(std::ref(*
this));
94 for (
const Node *
N : A) {
106 void print(
bool B) { printStr(
B ?
"true" :
"false"); }
108 template <
class T> std::enable_if_t<std::is_unsigned<T>::value>
print(
T N) {
109 fprintf(stderr,
"%llu", (
unsigned long long)
N);
112 template <
class T> std::enable_if_t<std::is_signed<T>::value>
print(
T N) {
113 fprintf(stderr,
"%lld", (
long long)
N);
119 return printStr(
"ReferenceKind::LValue");
121 return printStr(
"ReferenceKind::RValue");
127 return printStr(
"FunctionRefQual::FrefQualNone");
129 return printStr(
"FunctionRefQual::FrefQualLValue");
131 return printStr(
"FunctionRefQual::FrefQualRValue");
135 if (!Qs)
return printStr(
"QualNone");
141 for (QualName
Name : Names) {
145 if (Qs) printStr(
" | ");
152 return printStr(
"SpecialSubKind::allocator");
154 return printStr(
"SpecialSubKind::basic_string");
156 return printStr(
"SpecialSubKind::string");
158 return printStr(
"SpecialSubKind::istream");
160 return printStr(
"SpecialSubKind::ostream");
162 return printStr(
"SpecialSubKind::iostream");
168 return printStr(
"TemplateParamKind::Type");
170 return printStr(
"TemplateParamKind::NonType");
172 return printStr(
"TemplateParamKind::Template");
178 return printStr(
"Node::Prec::Primary");
180 return printStr(
"Node::Prec::Postfix");
182 return printStr(
"Node::Prec::Unary");
184 return printStr(
"Node::Prec::Cast");
186 return printStr(
"Node::Prec::PtrMem");
188 return printStr(
"Node::Prec::Multiplicative");
190 return printStr(
"Node::Prec::Additive");
192 return printStr(
"Node::Prec::Shift");
194 return printStr(
"Node::Prec::Spaceship");
196 return printStr(
"Node::Prec::Relational");
198 return printStr(
"Node::Prec::Equality");
200 return printStr(
"Node::Prec::And");
202 return printStr(
"Node::Prec::Xor");
204 return printStr(
"Node::Prec::Ior");
206 return printStr(
"Node::Prec::AndIf");
208 return printStr(
"Node::Prec::OrIf");
210 return printStr(
"Node::Prec::Conditional");
212 return printStr(
"Node::Prec::Assign");
214 return printStr(
"Node::Prec::Comma");
216 return printStr(
"Node::Prec::Default");
222 for (
unsigned I = 0;
I !=
Depth; ++
I)
224 PendingNewline =
false;
227 template<
typename T>
void printWithPendingNewline(
T V) {
230 PendingNewline =
true;
233 template<
typename T>
void printWithComma(
T V) {
234 if (PendingNewline || wantsNewline(V)) {
241 printWithPendingNewline(V);
244 struct CtorArgPrinter {
245 DumpVisitor &Visitor;
247 template<
typename T,
typename ...Rest>
void operator()(
T V, Rest ...Vs) {
248 if (Visitor.anyWantNewline(V, Vs...))
250 Visitor.printWithPendingNewline(V);
251 int PrintInOrder[] = { (Visitor.printWithComma(Vs), 0)..., 0 };
256 template<
typename NodeT>
void operator()(
const NodeT *Node) {
259 Node->match(CtorArgPrinter{*
this});
260 fprintf(stderr,
")");
264 void operator()(
const ForwardTemplateReference *Node) {
266 fprintf(stderr,
"ForwardTemplateReference(");
267 if (Node->Ref && !Node->Printing) {
268 Node->Printing =
true;
269 CtorArgPrinter{*
this}(Node->Ref);
270 Node->Printing =
false;
272 CtorArgPrinter{*
this}(Node->Index);
274 fprintf(stderr,
")");
288 class BumpPointerAllocator {
294 static constexpr
size_t AllocSize = 4096;
295 static constexpr
size_t UsableAllocSize = AllocSize -
sizeof(BlockMeta);
297 alignas(
long double)
char InitialBuffer[AllocSize];
298 BlockMeta* BlockList =
nullptr;
301 char* NewMeta =
static_cast<char *
>(std::malloc(AllocSize));
302 if (NewMeta ==
nullptr)
304 BlockList =
new (NewMeta) BlockMeta{BlockList, 0};
307 void* allocateMassive(
size_t NBytes) {
308 NBytes +=
sizeof(BlockMeta);
309 BlockMeta* NewMeta =
reinterpret_cast<BlockMeta*
>(std::malloc(NBytes));
310 if (NewMeta ==
nullptr)
312 BlockList->Next =
new (NewMeta) BlockMeta{BlockList->Next, 0};
313 return static_cast<void*
>(NewMeta + 1);
317 BumpPointerAllocator()
318 : BlockList(
new (InitialBuffer) BlockMeta{
nullptr, 0}) {}
320 void* allocate(
size_t N) {
321 N = (
N + 15u) & ~15u;
322 if (
N + BlockList->Current >= UsableAllocSize) {
323 if (
N > UsableAllocSize)
324 return allocateMassive(
N);
327 BlockList->Current +=
N;
328 return static_cast<void*
>(
reinterpret_cast<char*
>(BlockList + 1) +
329 BlockList->Current -
N);
334 BlockMeta* Tmp = BlockList;
335 BlockList = BlockList->Next;
336 if (
reinterpret_cast<char*
>(Tmp) != InitialBuffer)
339 BlockList =
new (InitialBuffer) BlockMeta{
nullptr, 0};
342 ~BumpPointerAllocator() { reset(); }
345 class DefaultAllocator {
346 BumpPointerAllocator Alloc;
349 void reset() { Alloc.reset(); }
351 template<
typename T,
typename ...Args>
T *makeNode(
Args &&...args) {
352 return new (Alloc.allocate(
sizeof(
T)))
353 T(std::forward<Args>(args)...);
356 void *allocateNodeArray(
size_t sz) {
357 return Alloc.allocate(
sizeof(Node *) * sz);
366 using Demangler = itanium_demangle::ManglingParser<DefaultAllocator>;
370 if (MangledName ==
nullptr || (Buf !=
nullptr &&
N ==
nullptr)) {
377 Demangler Parser(MangledName, MangledName + std::strlen(MangledName));
380 Node *AST = Parser.parse();
387 assert(Parser.ForwardTemplateRefs.empty());
391 *
N =
OB.getCurrentPosition();
392 Buf =
OB.getBuffer();
404 delete static_cast<Demangler *
>(Context);
423 size_t Len = std::strlen(MangledName);
424 Parser->reset(MangledName, MangledName + Len);
425 RootNode = Parser->parse();
426 return RootNode ==
nullptr;
429 static char *
printNode(
const Node *RootNode,
char *Buf,
size_t *
N) {
436 *
N =
OB.getCurrentPosition();
437 return OB.getBuffer();
447 switch (
Name->getKind()) {
448 case Node::KAbiTagAttr:
451 case Node::KModuleEntity:
454 case Node::KNestedName:
457 case Node::KLocalName:
460 case Node::KNameWithTemplateArgs:
479 KeepGoingLocalFunction:
481 if (
Name->getKind() == Node::KAbiTagAttr) {
485 if (
Name->getKind() == Node::KNameWithTemplateArgs) {
492 if (
Name->getKind() == Node::KModuleEntity)
495 switch (
Name->getKind()) {
496 case Node::KNestedName:
499 case Node::KLocalName: {
504 goto KeepGoingLocalFunction;
511 *
N =
OB.getCurrentPosition();
512 return OB.getBuffer();
537 *
N =
OB.getCurrentPosition();
538 return OB.getBuffer();
542 char *Buf,
size_t *
N)
const {
550 if (
const Node *
Ret =
556 *
N =
OB.getCurrentPosition();
557 return OB.getBuffer();
561 assert(RootNode !=
nullptr &&
"must call partialDemangle()");
562 return printNode(
static_cast<Node *
>(RootNode), Buf,
N);
566 assert(RootNode !=
nullptr &&
"must call partialDemangle()");
574 const Node *
N =
static_cast<const Node *
>(RootNode);
576 switch (
N->getKind()) {
579 case Node::KCtorDtorName:
582 case Node::KAbiTagAttr:
585 case Node::KFunctionEncoding:
588 case Node::KLocalName:
591 case Node::KNameWithTemplateArgs:
594 case Node::KNestedName:
597 case Node::KModuleEntity:
606 assert(RootNode !=
nullptr &&
"must call partialDemangle()");
607 return static_cast<const Node *
>(RootNode)->getKind() ==
608 Node::KFunctionEncoding;
612 assert(RootNode !=
nullptr &&
"must call partialDemangle()");
613 auto K =
static_cast<const Node *
>(RootNode)->getKind();
614 return K == Node::KSpecialName || K == Node::KCtorVtableSpecialName;
~ItaniumPartialDemangler()
static StringRef getName(Value *V)
This is an optimization pass for GlobalISel generic memory operations.
@ demangle_invalid_mangled_name
ItaniumPartialDemangler()
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
char * getFunctionName(char *Buf, size_t *N) const
Get the entire name of this function.
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
into xmm2 addss xmm2 xmm1 xmm3 addss xmm3 movaps xmm0 unpcklps xmm0 ret seems silly when it could just be one addps Expand libm rounding functions main should enable SSE DAZ mode and other fast SSE modes Think about doing i64 math in SSE regs on x86 This testcase should have no SSE instructions in and only one load from a constant double
bool isSpecialName() const
If this symbol is a <special-name>.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool partialDemangle(const char *MangledName)
Demangle into an AST.
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
itanium_demangle::ManglingParser< DefaultAllocator > Demangler
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Prec
Operator precedence for expression nodes.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
into llvm powi allowing the code generator to produce balanced multiplication trees First
static char * printNode(const Node *RootNode, char *Buf, size_t *N)
const char * parse_discriminator(const char *first, const char *last)
ItaniumPartialDemangler & operator=(ItaniumPartialDemangler &&Other)
bool isData() const
If this symbol describes a variable.
char * getFunctionParameters(char *Buf, size_t *N) const
Get the parameters for this function.
@ demangle_memory_alloc_failure
char * getFunctionDeclContextName(char *Buf, size_t *N) const
Get the context name for a function.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
void print(OutputBuffer &OB) const
void printWithComma(OutputBuffer &OB) const
bool hasFunctionQualifiers() const
If this function has any any cv or reference qualifiers.
<%struct.bf ** > define void t1() nounwind ssp
bool initializeOutputBuffer(char *Buf, size_t *N, OutputBuffer &OB, size_t InitSize)
char * getFunctionBaseName(char *Buf, size_t *N) const
Get the base name of a function.
char * getFunctionReturnType(char *Buf, size_t *N) const
char * finishDemangle(char *Buf, size_t *N) const
Just print the entire mangled name into Buf.
char * itaniumDemangle(const char *mangled_name, char *buf, size_t *n, int *status)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isCtorOrDtor() const
If this symbol describes a constructor or destructor.
bool isFunction() const
If this symbol describes a function.
Optional< std::vector< StOtherPiece > > Other