LLVM 18.0.0git
Analysis.cpp
Go to the documentation of this file.
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
19#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Function.h"
24#include "llvm/IR/Module.h"
27
28using namespace llvm;
29
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
32/// index list.
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
36 unsigned CurIndex) {
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
39 return CurIndex;
40
41 // Given a struct type, recursively traverse the elements.
42 if (StructType *STy = dyn_cast<StructType>(Ty)) {
43 for (auto I : llvm::enumerate(STy->elements())) {
44 Type *ET = I.value();
45 if (Indices && *Indices == I.index())
46 return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
47 CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex);
48 }
49 assert(!Indices && "Unexpected out of bound");
50 return CurIndex;
51 }
52 // Given an array type, recursively traverse the elements.
53 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
57 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
58 if (Indices) {
59 assert(*Indices < NumElts && "Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
63 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
64 }
65 CurIndex += EltLinearOffset*NumElts;
66 return CurIndex;
67 }
68 // We haven't found the type we're looking for, so keep searching.
69 return CurIndex + 1;
70}
71
72/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73/// EVTs that represent all the individual underlying
74/// non-aggregate types that comprise it.
75///
76/// If Offsets is non-null, it points to a vector to be filled in
77/// with the in-memory offsets of each of the individual values.
78///
80 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
83 TypeSize StartingOffset) {
84 // Given a struct type, recursively traverse the elements.
85 if (StructType *STy = dyn_cast<StructType>(Ty)) {
86 // If the Offsets aren't needed, don't query the struct layout. This allows
87 // us to support structs with scalable vectors for operations that don't
88 // need offsets.
89 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
90 for (StructType::element_iterator EB = STy->element_begin(),
91 EI = EB,
92 EE = STy->element_end();
93 EI != EE; ++EI) {
94 // Don't compute the element offset if we didn't get a StructLayout above.
95 TypeSize EltOffset = SL ? SL->getElementOffset(EI - EB)
96 : TypeSize::get(0, StartingOffset.isScalable());
97 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
98 StartingOffset + EltOffset);
99 }
100 return;
101 }
102 // Given an array type, recursively traverse the elements.
103 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
104 Type *EltTy = ATy->getElementType();
105 TypeSize EltSize = DL.getTypeAllocSize(EltTy);
106 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
107 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
108 StartingOffset + i * EltSize);
109 return;
110 }
111 // Interpret void as zero return values.
112 if (Ty->isVoidTy())
113 return;
114 // Base case: we can get an EVT for this LLVM IR type.
115 ValueVTs.push_back(TLI.getValueType(DL, Ty));
116 if (MemVTs)
117 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
118 if (Offsets)
119 Offsets->push_back(StartingOffset);
120}
121
123 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
125 TypeSize StartingOffset) {
126 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
127 StartingOffset);
128}
129
131 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
133 uint64_t StartingOffset) {
134 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
135 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, Offsets, Offset);
136}
137
139 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
140 SmallVectorImpl<uint64_t> *FixedOffsets,
141 uint64_t StartingOffset) {
142 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
143 if (FixedOffsets) {
145 ComputeValueVTs(TLI, DL, Ty, ValueVTs, &Offsets, Offset);
146 for (TypeSize Offset : Offsets)
147 FixedOffsets->push_back(Offset.getFixedValue());
148 } else {
149 ComputeValueVTs(TLI, DL, Ty, ValueVTs, nullptr, Offset);
150 }
151}
152
154 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
155 SmallVectorImpl<EVT> *MemVTs,
157 uint64_t StartingOffset) {
158 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
159 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets, Offset);
160}
161
163 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
164 SmallVectorImpl<EVT> *MemVTs,
165 SmallVectorImpl<uint64_t> *FixedOffsets,
166 uint64_t StartingOffset) {
167 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
168 if (FixedOffsets) {
170 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, &Offsets, Offset);
171 for (TypeSize Offset : Offsets)
172 FixedOffsets->push_back(Offset.getFixedValue());
173 } else {
174 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, nullptr, Offset);
175 }
176}
177
179 SmallVectorImpl<LLT> &ValueTys,
181 uint64_t StartingOffset) {
182 // Given a struct type, recursively traverse the elements.
183 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
184 // If the Offsets aren't needed, don't query the struct layout. This allows
185 // us to support structs with scalable vectors for operations that don't
186 // need offsets.
187 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
188 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
189 uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
190 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
191 StartingOffset + EltOffset);
192 }
193 return;
194 }
195 // Given an array type, recursively traverse the elements.
196 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
197 Type *EltTy = ATy->getElementType();
198 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
199 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
200 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
201 StartingOffset + i * EltSize);
202 return;
203 }
204 // Interpret void as zero return values.
205 if (Ty.isVoidTy())
206 return;
207 // Base case: we can get an LLT for this LLVM IR type.
208 ValueTys.push_back(getLLTForType(Ty, DL));
209 if (Offsets != nullptr)
210 Offsets->push_back(StartingOffset * 8);
211}
212
213/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
215 V = V->stripPointerCasts();
216 GlobalValue *GV = dyn_cast<GlobalValue>(V);
217 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
218
219 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
220 assert(Var->hasInitializer() &&
221 "The EH catch-all value must have an initializer");
222 Value *Init = Var->getInitializer();
223 GV = dyn_cast<GlobalValue>(Init);
224 if (!GV) V = cast<ConstantPointerNull>(Init);
225 }
226
227 assert((GV || isa<ConstantPointerNull>(V)) &&
228 "TypeInfo must be a global variable or NULL");
229 return GV;
230}
231
232/// getFCmpCondCode - Return the ISD condition code corresponding to
233/// the given LLVM IR floating-point condition code. This includes
234/// consideration of global floating-point math flags.
235///
237 switch (Pred) {
238 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
239 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
240 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
241 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
242 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
243 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
244 case FCmpInst::FCMP_ONE: return ISD::SETONE;
245 case FCmpInst::FCMP_ORD: return ISD::SETO;
246 case FCmpInst::FCMP_UNO: return ISD::SETUO;
247 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
248 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
249 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
250 case FCmpInst::FCMP_ULT: return ISD::SETULT;
251 case FCmpInst::FCMP_ULE: return ISD::SETULE;
252 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
253 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
254 default: llvm_unreachable("Invalid FCmp predicate opcode!");
255 }
256}
257
259 switch (CC) {
260 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
261 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
262 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
263 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
264 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
265 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
266 default: return CC;
267 }
268}
269
271 switch (Pred) {
272 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
273 case ICmpInst::ICMP_NE: return ISD::SETNE;
274 case ICmpInst::ICMP_SLE: return ISD::SETLE;
275 case ICmpInst::ICMP_ULE: return ISD::SETULE;
276 case ICmpInst::ICMP_SGE: return ISD::SETGE;
277 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
278 case ICmpInst::ICMP_SLT: return ISD::SETLT;
279 case ICmpInst::ICMP_ULT: return ISD::SETULT;
280 case ICmpInst::ICMP_SGT: return ISD::SETGT;
281 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
282 default:
283 llvm_unreachable("Invalid ICmp predicate opcode!");
284 }
285}
286
288 switch (Pred) {
289 case ISD::SETEQ:
290 return ICmpInst::ICMP_EQ;
291 case ISD::SETNE:
292 return ICmpInst::ICMP_NE;
293 case ISD::SETLE:
294 return ICmpInst::ICMP_SLE;
295 case ISD::SETULE:
296 return ICmpInst::ICMP_ULE;
297 case ISD::SETGE:
298 return ICmpInst::ICMP_SGE;
299 case ISD::SETUGE:
300 return ICmpInst::ICMP_UGE;
301 case ISD::SETLT:
302 return ICmpInst::ICMP_SLT;
303 case ISD::SETULT:
304 return ICmpInst::ICMP_ULT;
305 case ISD::SETGT:
306 return ICmpInst::ICMP_SGT;
307 case ISD::SETUGT:
308 return ICmpInst::ICMP_UGT;
309 default:
310 llvm_unreachable("Invalid ISD integer condition code!");
311 }
312}
313
314static bool isNoopBitcast(Type *T1, Type *T2,
315 const TargetLoweringBase& TLI) {
316 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
317 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
319}
320
321/// Look through operations that will be free to find the earliest source of
322/// this value.
323///
324/// @param ValLoc If V has aggregate type, we will be interested in a particular
325/// scalar component. This records its address; the reverse of this list gives a
326/// sequence of indices appropriate for an extractvalue to locate the important
327/// value. This value is updated during the function and on exit will indicate
328/// similar information for the Value returned.
329///
330/// @param DataBits If this function looks through truncate instructions, this
331/// will record the smallest size attained.
332static const Value *getNoopInput(const Value *V,
334 unsigned &DataBits,
335 const TargetLoweringBase &TLI,
336 const DataLayout &DL) {
337 while (true) {
338 // Try to look through V1; if V1 is not an instruction, it can't be looked
339 // through.
340 const Instruction *I = dyn_cast<Instruction>(V);
341 if (!I || I->getNumOperands() == 0) return V;
342 const Value *NoopInput = nullptr;
343
344 Value *Op = I->getOperand(0);
345 if (isa<BitCastInst>(I)) {
346 // Look through truly no-op bitcasts.
347 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
348 NoopInput = Op;
349 } else if (isa<GetElementPtrInst>(I)) {
350 // Look through getelementptr
351 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
352 NoopInput = Op;
353 } else if (isa<IntToPtrInst>(I)) {
354 // Look through inttoptr.
355 // Make sure this isn't a truncating or extending cast. We could
356 // support this eventually, but don't bother for now.
357 if (!isa<VectorType>(I->getType()) &&
358 DL.getPointerSizeInBits() ==
359 cast<IntegerType>(Op->getType())->getBitWidth())
360 NoopInput = Op;
361 } else if (isa<PtrToIntInst>(I)) {
362 // Look through ptrtoint.
363 // Make sure this isn't a truncating or extending cast. We could
364 // support this eventually, but don't bother for now.
365 if (!isa<VectorType>(I->getType()) &&
366 DL.getPointerSizeInBits() ==
367 cast<IntegerType>(I->getType())->getBitWidth())
368 NoopInput = Op;
369 } else if (isa<TruncInst>(I) &&
370 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
371 DataBits =
372 std::min((uint64_t)DataBits,
373 I->getType()->getPrimitiveSizeInBits().getFixedValue());
374 NoopInput = Op;
375 } else if (auto *CB = dyn_cast<CallBase>(I)) {
376 const Value *ReturnedOp = CB->getReturnedArgOperand();
377 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
378 NoopInput = ReturnedOp;
379 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
380 // Value may come from either the aggregate or the scalar
381 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
382 if (ValLoc.size() >= InsertLoc.size() &&
383 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
384 // The type being inserted is a nested sub-type of the aggregate; we
385 // have to remove those initial indices to get the location we're
386 // interested in for the operand.
387 ValLoc.resize(ValLoc.size() - InsertLoc.size());
388 NoopInput = IVI->getInsertedValueOperand();
389 } else {
390 // The struct we're inserting into has the value we're interested in, no
391 // change of address.
392 NoopInput = Op;
393 }
394 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
395 // The part we're interested in will inevitably be some sub-section of the
396 // previous aggregate. Combine the two paths to obtain the true address of
397 // our element.
398 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
399 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
400 NoopInput = Op;
401 }
402 // Terminate if we couldn't find anything to look through.
403 if (!NoopInput)
404 return V;
405
406 V = NoopInput;
407 }
408}
409
410/// Return true if this scalar return value only has bits discarded on its path
411/// from the "tail call" to the "ret". This includes the obvious noop
412/// instructions handled by getNoopInput above as well as free truncations (or
413/// extensions prior to the call).
414static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
415 SmallVectorImpl<unsigned> &RetIndices,
416 SmallVectorImpl<unsigned> &CallIndices,
417 bool AllowDifferingSizes,
418 const TargetLoweringBase &TLI,
419 const DataLayout &DL) {
420
421 // Trace the sub-value needed by the return value as far back up the graph as
422 // possible, in the hope that it will intersect with the value produced by the
423 // call. In the simple case with no "returned" attribute, the hope is actually
424 // that we end up back at the tail call instruction itself.
425 unsigned BitsRequired = UINT_MAX;
426 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
427
428 // If this slot in the value returned is undef, it doesn't matter what the
429 // call puts there, it'll be fine.
430 if (isa<UndefValue>(RetVal))
431 return true;
432
433 // Now do a similar search up through the graph to find where the value
434 // actually returned by the "tail call" comes from. In the simple case without
435 // a "returned" attribute, the search will be blocked immediately and the loop
436 // a Noop.
437 unsigned BitsProvided = UINT_MAX;
438 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
439
440 // There's no hope if we can't actually trace them to (the same part of!) the
441 // same value.
442 if (CallVal != RetVal || CallIndices != RetIndices)
443 return false;
444
445 // However, intervening truncates may have made the call non-tail. Make sure
446 // all the bits that are needed by the "ret" have been provided by the "tail
447 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
448 // extensions too.
449 if (BitsProvided < BitsRequired ||
450 (!AllowDifferingSizes && BitsProvided != BitsRequired))
451 return false;
452
453 return true;
454}
455
456/// For an aggregate type, determine whether a given index is within bounds or
457/// not.
458static bool indexReallyValid(Type *T, unsigned Idx) {
459 if (ArrayType *AT = dyn_cast<ArrayType>(T))
460 return Idx < AT->getNumElements();
461
462 return Idx < cast<StructType>(T)->getNumElements();
463}
464
465/// Move the given iterators to the next leaf type in depth first traversal.
466///
467/// Performs a depth-first traversal of the type as specified by its arguments,
468/// stopping at the next leaf node (which may be a legitimate scalar type or an
469/// empty struct or array).
470///
471/// @param SubTypes List of the partial components making up the type from
472/// outermost to innermost non-empty aggregate. The element currently
473/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
474///
475/// @param Path Set of extractvalue indices leading from the outermost type
476/// (SubTypes[0]) to the leaf node currently represented.
477///
478/// @returns true if a new type was found, false otherwise. Calling this
479/// function again on a finished iterator will repeatedly return
480/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
481/// aggregate or a non-aggregate
484 // First march back up the tree until we can successfully increment one of the
485 // coordinates in Path.
486 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
487 Path.pop_back();
488 SubTypes.pop_back();
489 }
490
491 // If we reached the top, then the iterator is done.
492 if (Path.empty())
493 return false;
494
495 // We know there's *some* valid leaf now, so march back down the tree picking
496 // out the left-most element at each node.
497 ++Path.back();
498 Type *DeeperType =
499 ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
500 while (DeeperType->isAggregateType()) {
501 if (!indexReallyValid(DeeperType, 0))
502 return true;
503
504 SubTypes.push_back(DeeperType);
505 Path.push_back(0);
506
507 DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
508 }
509
510 return true;
511}
512
513/// Find the first non-empty, scalar-like type in Next and setup the iterator
514/// components.
515///
516/// Assuming Next is an aggregate of some kind, this function will traverse the
517/// tree from left to right (i.e. depth-first) looking for the first
518/// non-aggregate type which will play a role in function return.
519///
520/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
521/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
522/// i32 in that type.
523static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
525 // First initialise the iterator components to the first "leaf" node
526 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
527 // despite nominally being an aggregate).
528 while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
529 SubTypes.push_back(Next);
530 Path.push_back(0);
531 Next = FirstInner;
532 }
533
534 // If there's no Path now, Next was originally scalar already (or empty
535 // leaf). We're done.
536 if (Path.empty())
537 return true;
538
539 // Otherwise, use normal iteration to keep looking through the tree until we
540 // find a non-aggregate type.
541 while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
542 ->isAggregateType()) {
543 if (!advanceToNextLeafType(SubTypes, Path))
544 return false;
545 }
546
547 return true;
548}
549
550/// Set the iterator data-structures to the next non-empty, non-aggregate
551/// subtype.
554 do {
555 if (!advanceToNextLeafType(SubTypes, Path))
556 return false;
557
558 assert(!Path.empty() && "found a leaf but didn't set the path?");
559 } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
560 ->isAggregateType());
561
562 return true;
563}
564
565
566/// Test if the given instruction is in a position to be optimized
567/// with a tail-call. This roughly means that it's in a block with
568/// a return and there's nothing that needs to be scheduled
569/// between it and the return.
570///
571/// This function only tests target-independent requirements.
573 const BasicBlock *ExitBB = Call.getParent();
574 const Instruction *Term = ExitBB->getTerminator();
575 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
576
577 // The block must end in a return statement or unreachable.
578 //
579 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
580 // an unreachable, for now. The way tailcall optimization is currently
581 // implemented means it will add an epilogue followed by a jump. That is
582 // not profitable. Also, if the callee is a special function (e.g.
583 // longjmp on x86), it can end up causing miscompilation that has not
584 // been fully understood.
585 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
586 Call.getCallingConv() != CallingConv::Tail &&
587 Call.getCallingConv() != CallingConv::SwiftTail) ||
588 !isa<UnreachableInst>(Term)))
589 return false;
590
591 // If I will have a chain, make sure no other instruction that will have a
592 // chain interposes between I and the return.
593 // Check for all calls including speculatable functions.
594 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
595 if (&*BBI == &Call)
596 break;
597 // Debug info intrinsics do not get in the way of tail call optimization.
598 // Pseudo probe intrinsics do not block tail call optimization either.
599 if (BBI->isDebugOrPseudoInst())
600 continue;
601 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
602 // call optimization.
603 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
604 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
605 II->getIntrinsicID() == Intrinsic::assume ||
606 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
607 continue;
608 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
610 return false;
611 }
612
613 const Function *F = ExitBB->getParent();
615 F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
616}
617
619 const ReturnInst *Ret,
620 const TargetLoweringBase &TLI,
621 bool *AllowDifferingSizes) {
622 // ADS may be null, so don't write to it directly.
623 bool DummyADS;
624 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
625 ADS = true;
626
627 AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
628 AttrBuilder CalleeAttrs(F->getContext(),
629 cast<CallInst>(I)->getAttributes().getRetAttrs());
630
631 // Following attributes are completely benign as far as calling convention
632 // goes, they shouldn't affect whether the call is a tail call.
633 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
634 Attribute::DereferenceableOrNull, Attribute::NoAlias,
635 Attribute::NonNull, Attribute::NoUndef}) {
636 CallerAttrs.removeAttribute(Attr);
637 CalleeAttrs.removeAttribute(Attr);
638 }
639
640 if (CallerAttrs.contains(Attribute::ZExt)) {
641 if (!CalleeAttrs.contains(Attribute::ZExt))
642 return false;
643
644 ADS = false;
645 CallerAttrs.removeAttribute(Attribute::ZExt);
646 CalleeAttrs.removeAttribute(Attribute::ZExt);
647 } else if (CallerAttrs.contains(Attribute::SExt)) {
648 if (!CalleeAttrs.contains(Attribute::SExt))
649 return false;
650
651 ADS = false;
652 CallerAttrs.removeAttribute(Attribute::SExt);
653 CalleeAttrs.removeAttribute(Attribute::SExt);
654 }
655
656 // Drop sext and zext return attributes if the result is not used.
657 // This enables tail calls for code like:
658 //
659 // define void @caller() {
660 // entry:
661 // %unused_result = tail call zeroext i1 @callee()
662 // br label %retlabel
663 // retlabel:
664 // ret void
665 // }
666 if (I->use_empty()) {
667 CalleeAttrs.removeAttribute(Attribute::SExt);
668 CalleeAttrs.removeAttribute(Attribute::ZExt);
669 }
670
671 // If they're still different, there's some facet we don't understand
672 // (currently only "inreg", but in future who knows). It may be OK but the
673 // only safe option is to reject the tail call.
674 return CallerAttrs == CalleeAttrs;
675}
676
677/// Check whether B is a bitcast of a pointer type to another pointer type,
678/// which is equal to A.
679static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
680 assert(A && B && "Expected non-null inputs!");
681
682 auto *BitCastIn = dyn_cast<BitCastInst>(B);
683
684 if (!BitCastIn)
685 return false;
686
687 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
688 return false;
689
690 return A == BitCastIn->getOperand(0);
691}
692
694 const Instruction *I,
695 const ReturnInst *Ret,
696 const TargetLoweringBase &TLI) {
697 // If the block ends with a void return or unreachable, it doesn't matter
698 // what the call's return type is.
699 if (!Ret || Ret->getNumOperands() == 0) return true;
700
701 // If the return value is undef, it doesn't matter what the call's
702 // return type is.
703 if (isa<UndefValue>(Ret->getOperand(0))) return true;
704
705 // Make sure the attributes attached to each return are compatible.
706 bool AllowDifferingSizes;
707 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
708 return false;
709
710 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
711 // Intrinsic like llvm.memcpy has no return value, but the expanded
712 // libcall may or may not have return value. On most platforms, it
713 // will be expanded as memcpy in libc, which returns the first
714 // argument. On other platforms like arm-none-eabi, memcpy may be
715 // expanded as library call without return value, like __aeabi_memcpy.
716 const CallInst *Call = cast<CallInst>(I);
717 if (Function *F = Call->getCalledFunction()) {
718 Intrinsic::ID IID = F->getIntrinsicID();
719 if (((IID == Intrinsic::memcpy &&
720 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
721 (IID == Intrinsic::memmove &&
722 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
723 (IID == Intrinsic::memset &&
724 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
725 (RetVal == Call->getArgOperand(0) ||
726 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
727 return true;
728 }
729
730 SmallVector<unsigned, 4> RetPath, CallPath;
731 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
732
733 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
734 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
735
736 // Nothing's actually returned, it doesn't matter what the callee put there
737 // it's a valid tail call.
738 if (RetEmpty)
739 return true;
740
741 // Iterate pairwise through each of the value types making up the tail call
742 // and the corresponding return. For each one we want to know whether it's
743 // essentially going directly from the tail call to the ret, via operations
744 // that end up not generating any code.
745 //
746 // We allow a certain amount of covariance here. For example it's permitted
747 // for the tail call to define more bits than the ret actually cares about
748 // (e.g. via a truncate).
749 do {
750 if (CallEmpty) {
751 // We've exhausted the values produced by the tail call instruction, the
752 // rest are essentially undef. The type doesn't really matter, but we need
753 // *something*.
754 Type *SlotType =
755 ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
756 CallVal = UndefValue::get(SlotType);
757 }
758
759 // The manipulations performed when we're looking through an insertvalue or
760 // an extractvalue would happen at the front of the RetPath list, so since
761 // we have to copy it anyway it's more efficient to create a reversed copy.
762 SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(RetPath));
763 SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(CallPath));
764
765 // Finally, we can check whether the value produced by the tail call at this
766 // index is compatible with the value we return.
767 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
768 AllowDifferingSizes, TLI,
769 F->getParent()->getDataLayout()))
770 return false;
771
772 CallEmpty = !nextRealType(CallSubTypes, CallPath);
773 } while(nextRealType(RetSubTypes, RetPath));
774
775 return true;
776}
777
779 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
780 const MachineBasicBlock *MBB) {
782 while (!Worklist.empty()) {
783 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
784 // Don't follow blocks which start new scopes.
785 if (Visiting->isEHPad() && Visiting != MBB)
786 continue;
787
788 // Add this MBB to our scope.
789 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
790
791 // Don't revisit blocks.
792 if (!P.second) {
793 assert(P.first->second == EHScope && "MBB is part of two scopes!");
794 continue;
795 }
796
797 // Returns are boundaries where scope transfer can occur, don't follow
798 // successors.
799 if (Visiting->isEHScopeReturnBlock())
800 continue;
801
802 append_range(Worklist, Visiting->successors());
803 }
804}
805
809
810 // We don't have anything to do if there aren't any EH pads.
811 if (!MF.hasEHScopes())
812 return EHScopeMembership;
813
814 int EntryBBNumber = MF.front().getNumber();
815 bool IsSEH = isAsynchronousEHPersonality(
817
823 for (const MachineBasicBlock &MBB : MF) {
824 if (MBB.isEHScopeEntry()) {
825 EHScopeBlocks.push_back(&MBB);
826 } else if (IsSEH && MBB.isEHPad()) {
827 SEHCatchPads.push_back(&MBB);
828 } else if (MBB.pred_empty()) {
829 UnreachableBlocks.push_back(&MBB);
830 }
831
833
834 // CatchPads are not scopes for SEH so do not consider CatchRet to
835 // transfer control to another scope.
836 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
837 continue;
838
839 // FIXME: SEH CatchPads are not necessarily in the parent function:
840 // they could be inside a finally block.
841 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
842 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
843 CatchRetSuccessors.push_back(
844 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
845 }
846
847 // We don't have anything to do if there aren't any EH pads.
848 if (EHScopeBlocks.empty())
849 return EHScopeMembership;
850
851 // Identify all the basic blocks reachable from the function entry.
852 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
853 // All blocks not part of a scope are in the parent function.
854 for (const MachineBasicBlock *MBB : UnreachableBlocks)
855 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
856 // Next, identify all the blocks inside the scopes.
857 for (const MachineBasicBlock *MBB : EHScopeBlocks)
858 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
859 // SEH CatchPads aren't really scopes, handle them separately.
860 for (const MachineBasicBlock *MBB : SEHCatchPads)
861 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
862 // Finally, identify all the targets of a catchret.
863 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
864 CatchRetSuccessors)
865 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
866 CatchRetPair.first);
867 return EHScopeMembership;
868}
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
Definition: Analysis.cpp:314
static bool firstRealType(Type *Next, SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
Definition: Analysis.cpp:523
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
Definition: Analysis.cpp:414
static void collectEHScopeMembers(DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, int EHScope, const MachineBasicBlock *MBB)
Definition: Analysis.cpp:778
static bool indexReallyValid(Type *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
Definition: Analysis.cpp:458
static bool nextRealType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
Definition: Analysis.cpp:552
static bool isPointerBitcastEqualTo(const Value *A, const Value *B)
Check whether B is a bitcast of a pointer type to another pointer type, which is equal to A.
Definition: Analysis.cpp:679
static bool advanceToNextLeafType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
Definition: Analysis.cpp:482
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
Definition: Analysis.cpp:332
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define T1
Module.h This file contains the declarations for the Module class.
#define P(N)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
reverse_iterator rend() const
Definition: ArrayRef.h:157
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
reverse_iterator rbegin() const
Definition: ArrayRef.h:156
Class to represent array types.
Definition: DerivedTypes.h:368
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator end()
Definition: BasicBlock.h:337
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:88
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1845
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
This instruction inserts a struct field of array element value into an aggregate value.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
bool isEHPad() const
Returns true if the block is a landing pad.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
iterator_range< succ_iterator > successors()
bool isEHScopeReturnBlock() const
Convenience function that returns true if the bock ends in a EH scope return instruction.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Return a value (possibly void), from a function.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:622
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:651
Class to represent struct types.
Definition: DerivedTypes.h:213
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:326
TargetInstrInfo - Interface to description of machine instruction set.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition: TypeSize.h:328
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1503
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< TypeSize > *Offsets, TypeSize StartingOffset)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:122
@ Offset
Definition: DWP.cpp:440
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition: Analysis.cpp:270
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2338
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2037
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI)
Test if given that the input instruction is in the tail call position if the return type or any attri...
Definition: Analysis.cpp:693
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:429
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:178
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition: Analysis.cpp:236
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
Definition: Analysis.cpp:618
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
Definition: Analysis.cpp:258
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:214
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:572
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
Definition: Analysis.cpp:807
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:616