LLVM 23.0.0git
LowerTypeTests.cpp
Go to the documentation of this file.
1//===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10// It also ensures that globals are properly laid out for the
11// llvm.icall.branch.funnel intrinsic.
12// See http://llvm.org/docs/TypeMetadata.html for more information.
13//
14//===----------------------------------------------------------------------===//
15
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SetVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/ADT/StringRef.h"
34#include "llvm/IR/Attributes.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/Constant.h"
37#include "llvm/IR/Constants.h"
38#include "llvm/IR/DIBuilder.h"
39#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/GlobalAlias.h"
44#include "llvm/IR/GlobalValue.h"
46#include "llvm/IR/IRBuilder.h"
47#include "llvm/IR/InlineAsm.h"
48#include "llvm/IR/Instruction.h"
51#include "llvm/IR/Intrinsics.h"
52#include "llvm/IR/LLVMContext.h"
53#include "llvm/IR/MDBuilder.h"
54#include "llvm/IR/Metadata.h"
55#include "llvm/IR/Module.h"
58#include "llvm/IR/Operator.h"
59#include "llvm/IR/PassManager.h"
62#include "llvm/IR/Type.h"
63#include "llvm/IR/Use.h"
64#include "llvm/IR/User.h"
65#include "llvm/IR/Value.h"
69#include "llvm/Support/Debug.h"
70#include "llvm/Support/Error.h"
79#include "llvm/Transforms/IPO.h"
82#include <algorithm>
83#include <cassert>
84#include <cstdint>
85#include <set>
86#include <string>
87#include <system_error>
88#include <utility>
89#include <vector>
90
91using namespace llvm;
92using namespace lowertypetests;
93
94#define DEBUG_TYPE "lowertypetests"
95
96STATISTIC(ByteArraySizeBits, "Byte array size in bits");
97STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
98STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
99STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
100STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
101
103 "lowertypetests-avoid-reuse",
104 cl::desc("Try to avoid reuse of byte array addresses using aliases"),
105 cl::Hidden, cl::init(true));
106
108 "lowertypetests-summary-action",
109 cl::desc("What to do with the summary when running this pass"),
110 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
112 "Import typeid resolutions from summary and globals"),
114 "Export typeid resolutions to summary and globals")),
115 cl::Hidden);
116
118 "lowertypetests-read-summary",
119 cl::desc("Read summary from given YAML file before running pass"),
120 cl::Hidden);
121
123 "lowertypetests-write-summary",
124 cl::desc("Write summary to given YAML file after running pass"),
125 cl::Hidden);
126
127// FIXME: Remove in clang 24.
129 "lowertypetests-jump-table-debug-info", cl::init(true), cl::Hidden,
130 cl::desc("Enable debug info generation for jump tables"));
131
133 if (Offset < ByteOffset)
134 return false;
135
136 if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
137 return false;
138
139 uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
140 if (BitOffset >= BitSize)
141 return false;
142
143 return Bits.count(BitSize - 1 - BitOffset);
144}
145
147 OS << "offset " << ByteOffset << " size " << BitSize << " align "
148 << (1 << AlignLog2);
149
150 if (isAllOnes()) {
151 OS << " all-ones\n";
152 return;
153 }
154
155 OS << " { ";
156 for (uint64_t B : Bits)
157 OS << B << ' ';
158 OS << "}\n";
159}
160
162 if (Min > Max)
163 Min = 0;
164
165 // Normalize each offset against the minimum observed offset, and compute
166 // the bitwise OR of each of the offsets. The number of trailing zeros
167 // in the mask gives us the log2 of the alignment of all offsets, which
168 // allows us to compress the bitset by only storing one bit per aligned
169 // address.
170 uint64_t Mask = 0;
171 for (uint64_t &Offset : Offsets) {
172 Offset -= Min;
173 Mask |= Offset;
174 }
175
176 BitSetInfo BSI;
177 BSI.ByteOffset = Min;
178
179 BSI.AlignLog2 = 0;
180 if (Mask != 0)
181 BSI.AlignLog2 = llvm::countr_zero(Mask);
182
183 // Build the compressed bitset while normalizing the offsets against the
184 // computed alignment.
185 BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
186 for (uint64_t Offset : Offsets) {
187 Offset >>= BSI.AlignLog2;
188 // We invert the order of bits when adding them to the bitset. This is
189 // because the offset that we test against is computed by subtracting the
190 // address that we are testing from the global's address, which means that
191 // the offset increases as the tested address decreases.
192 BSI.Bits.insert(BSI.BitSize - 1 - Offset);
193 }
194
195 return BSI;
196}
197
198void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
199 // Create a new fragment to hold the layout for F.
200 Fragments.emplace_back();
201 std::vector<uint64_t> &Fragment = Fragments.back();
202 uint64_t FragmentIndex = Fragments.size() - 1;
203
204 for (auto ObjIndex : F) {
205 uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
206 if (OldFragmentIndex == 0) {
207 // We haven't seen this object index before, so just add it to the current
208 // fragment.
209 Fragment.push_back(ObjIndex);
210 } else {
211 // This index belongs to an existing fragment. Copy the elements of the
212 // old fragment into this one and clear the old fragment. We don't update
213 // the fragment map just yet, this ensures that any further references to
214 // indices from the old fragment in this fragment do not insert any more
215 // indices.
216 std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
217 llvm::append_range(Fragment, OldFragment);
218 OldFragment.clear();
219 }
220 }
221
222 // Update the fragment map to point our object indices to this fragment.
223 for (uint64_t ObjIndex : Fragment)
224 FragmentMap[ObjIndex] = FragmentIndex;
225}
226
227void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
228 uint64_t BitSize, uint64_t &AllocByteOffset,
229 uint8_t &AllocMask) {
230 // Find the smallest current allocation.
231 unsigned Bit = 0;
232 for (unsigned I = 1; I != BitsPerByte; ++I)
233 if (BitAllocs[I] < BitAllocs[Bit])
234 Bit = I;
235
236 AllocByteOffset = BitAllocs[Bit];
237
238 // Add our size to it.
239 unsigned ReqSize = AllocByteOffset + BitSize;
240 BitAllocs[Bit] = ReqSize;
241 if (Bytes.size() < ReqSize)
242 Bytes.resize(ReqSize);
243
244 // Set our bits.
245 AllocMask = 1 << Bit;
246 for (uint64_t B : Bits)
247 Bytes[AllocByteOffset + B] |= AllocMask;
248}
249
251 if (F->isDeclarationForLinker())
252 return false;
254 F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
255 if (!CI || !CI->isZero())
256 return true;
257 return F->hasFnAttribute("cfi-canonical-jump-table");
258}
259
260namespace {
261
262struct ByteArrayInfo {
263 std::set<uint64_t> Bits;
264 uint64_t BitSize;
265 GlobalVariable *ByteArray;
266 GlobalVariable *MaskGlobal;
267 uint8_t *MaskPtr = nullptr;
268};
269
270/// A POD-like structure that we use to store a global reference together with
271/// its metadata types. In this pass we frequently need to query the set of
272/// metadata types referenced by a global, which at the IR level is an expensive
273/// operation involving a map lookup; this data structure helps to reduce the
274/// number of times we need to do this lookup.
275class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
276 friend TrailingObjects;
277
278 GlobalObject *GO;
279 size_t NTypes;
280
281 // For functions: true if the jump table is canonical. This essentially means
282 // whether the canonical address (i.e. the symbol table entry) of the function
283 // is provided by the local jump table. This is normally the same as whether
284 // the function is defined locally, but if canonical jump tables are disabled
285 // by the user then the jump table never provides a canonical definition.
286 bool IsJumpTableCanonical;
287
288 // For functions: true if this function is either defined or used in a thinlto
289 // module and its jumptable entry needs to be exported to thinlto backends.
290 bool IsExported;
291
292public:
293 static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
294 bool IsJumpTableCanonical, bool IsExported,
295 ArrayRef<MDNode *> Types) {
296 auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
297 totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
298 GTM->GO = GO;
299 GTM->NTypes = Types.size();
300 GTM->IsJumpTableCanonical = IsJumpTableCanonical;
301 GTM->IsExported = IsExported;
302 llvm::copy(Types, GTM->getTrailingObjects());
303 return GTM;
304 }
305
306 GlobalObject *getGlobal() const {
307 return GO;
308 }
309
310 bool isJumpTableCanonical() const {
311 return IsJumpTableCanonical;
312 }
313
314 bool isExported() const {
315 return IsExported;
316 }
317
318 ArrayRef<MDNode *> types() const { return getTrailingObjects(NTypes); }
319};
320
321struct ICallBranchFunnel final
322 : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
323 static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
325 unsigned UniqueId) {
326 auto *Call = static_cast<ICallBranchFunnel *>(
327 Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
328 alignof(ICallBranchFunnel)));
329 Call->CI = CI;
330 Call->UniqueId = UniqueId;
331 Call->NTargets = Targets.size();
332 llvm::copy(Targets, Call->getTrailingObjects());
333 return Call;
334 }
335
336 CallInst *CI;
337 ArrayRef<GlobalTypeMember *> targets() const {
338 return getTrailingObjects(NTargets);
339 }
340
341 unsigned UniqueId;
342
343private:
344 size_t NTargets;
345};
346
347struct ScopedSaveAliaseesAndUsed {
348 Module &M;
350 std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
351 std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
352
353 // This function only removes functions from llvm.used and llvm.compiler.used.
354 // We cannot remove global variables because they need to follow RAUW, as
355 // they may be deleted by buildBitSetsFromGlobalVariables.
356 void collectAndEraseUsedFunctions(Module &M,
357 SmallVectorImpl<GlobalValue *> &Vec,
358 bool CompilerUsed) {
359 auto *GV = collectUsedGlobalVariables(M, Vec, CompilerUsed);
360 if (!GV)
361 return;
362 // There's no API to only remove certain array elements from
363 // llvm.used/llvm.compiler.used, so we remove all of them and add back only
364 // the non-functions.
365 GV->eraseFromParent();
366 auto NonFuncBegin =
367 std::stable_partition(Vec.begin(), Vec.end(), [](GlobalValue *GV) {
368 return isa<Function>(GV);
369 });
370 if (CompilerUsed)
371 appendToCompilerUsed(M, {NonFuncBegin, Vec.end()});
372 else
373 appendToUsed(M, {NonFuncBegin, Vec.end()});
374 Vec.resize(NonFuncBegin - Vec.begin());
375 }
376
377 ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
378 // The users of this class want to replace all function references except
379 // for aliases and llvm.used/llvm.compiler.used with references to a jump
380 // table. We avoid replacing aliases in order to avoid introducing a double
381 // indirection (or an alias pointing to a declaration in ThinLTO mode), and
382 // we avoid replacing llvm.used/llvm.compiler.used because these global
383 // variables describe properties of the global, not the jump table (besides,
384 // offseted references to the jump table in llvm.used are invalid).
385 // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
386 // indirect) users", so what we do is save the list of globals referenced by
387 // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
388 // replace the aliasees and then set them back to their original values at
389 // the end.
390 collectAndEraseUsedFunctions(M, Used, false);
391 collectAndEraseUsedFunctions(M, CompilerUsed, true);
392
393 for (auto &GA : M.aliases()) {
394 // FIXME: This should look past all aliases not just interposable ones,
395 // see discussion on D65118.
396 if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
397 FunctionAliases.push_back({&GA, F});
398 }
399
400 for (auto &GI : M.ifuncs())
401 if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
402 ResolverIFuncs.push_back({&GI, F});
403 }
404
405 ~ScopedSaveAliaseesAndUsed() {
406 appendToUsed(M, Used);
407 appendToCompilerUsed(M, CompilerUsed);
408
409 for (auto P : FunctionAliases)
410 P.first->setAliasee(P.second);
411
412 for (auto P : ResolverIFuncs) {
413 // This does not preserve pointer casts that may have been stripped by the
414 // constructor, but the resolver's type is different from that of the
415 // ifunc anyway.
416 P.first->setResolver(P.second);
417 }
418 }
419};
420
421class LowerTypeTestsModule {
422 Module &M;
423
424 ModuleSummaryIndex *ExportSummary;
425 const ModuleSummaryIndex *ImportSummary;
426
427 Triple::ArchType Arch;
429 Triple::ObjectFormatType ObjectFormat;
430
431 // Determines which kind of Thumb jump table we generate. If arch is
432 // either 'arm' or 'thumb' we need to find this out, because
433 // selectJumpTableArmEncoding may decide to use Thumb in either case.
434 bool CanUseArmJumpTable = false, CanUseThumbBWJumpTable = false;
435
436 // Cache variable used by hasBranchTargetEnforcement().
437 int HasBranchTargetEnforcement = -1;
438
439 IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
440 IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
441 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
442 ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
443 IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
444 IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
445 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
446
447 // Indirect function call index assignment counter for WebAssembly
448 uint64_t IndirectIndex = 1;
449
450 // Mapping from type identifiers to the call sites that test them, as well as
451 // whether the type identifier needs to be exported to ThinLTO backends as
452 // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
453 struct TypeIdUserInfo {
454 std::vector<CallInst *> CallSites;
455 bool IsExported = false;
456 };
457 DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
458
459 /// This structure describes how to lower type tests for a particular type
460 /// identifier. It is either built directly from the global analysis (during
461 /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
462 /// identifier summaries and external symbol references (in ThinLTO backends).
463 struct TypeIdLowering {
465
466 /// All except Unsat: the address of the last element within the combined
467 /// global.
468 Constant *OffsetedGlobal;
469
470 /// ByteArray, Inline, AllOnes: log2 of the required global alignment
471 /// relative to the start address.
472 Constant *AlignLog2;
473
474 /// ByteArray, Inline, AllOnes: one less than the size of the memory region
475 /// covering members of this type identifier as a multiple of 2^AlignLog2.
476 Constant *SizeM1;
477
478 /// ByteArray: the byte array to test the address against.
479 Constant *TheByteArray;
480
481 /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
482 Constant *BitMask;
483
484 /// Inline: the bit mask to test the address against.
485 Constant *InlineBits;
486 };
487
488 std::vector<ByteArrayInfo> ByteArrayInfos;
489
490 Function *WeakInitializerFn = nullptr;
491
492 GlobalVariable *GlobalAnnotation;
493 DenseSet<Value *> FunctionAnnotations;
494
495 bool shouldExportConstantsAsAbsoluteSymbols();
496 uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
497 TypeIdLowering importTypeId(StringRef TypeId);
498 void importTypeTest(CallInst *CI);
499 void importFunction(Function *F, bool isJumpTableCanonical);
500
501 ByteArrayInfo *createByteArray(const BitSetInfo &BSI);
502 void allocateByteArrays();
503 Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
504 Value *BitOffset);
505 void lowerTypeTestCalls(
506 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
507 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
508 Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
509 const TypeIdLowering &TIL);
510
511 void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
514 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions);
515 bool hasBranchTargetEnforcement();
516 unsigned getJumpTableEntrySize(Triple::ArchType JumpTableArch);
517 InlineAsm *createJumpTableEntryAsm(Triple::ArchType JumpTableArch);
518 void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
519 void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
521 void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
523 void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
525 void
526 buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
528 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
529
530 void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
531 bool IsJumpTableCanonical);
532 void moveInitializerToModuleConstructor(GlobalVariable *GV);
533 void findGlobalVariableUsersOf(Constant *C,
534 SmallSetVector<GlobalVariable *, 8> &Out);
535
536 void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions,
537 Triple::ArchType JumpTableArch);
538
539 /// replaceCfiUses - Go through the uses list for this definition
540 /// and make each use point to "V" instead of "this" when the use is outside
541 /// the block. 'This's use list is expected to have at least one element.
542 /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
543 /// uses.
544 void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
545
546 /// replaceDirectCalls - Go through the uses list for this definition and
547 /// replace each use, which is a direct function call.
548 void replaceDirectCalls(Value *Old, Value *New);
549
550 bool isFunctionAnnotation(Value *V) const {
551 return FunctionAnnotations.contains(V);
552 }
553
554 void maybeReplaceComdat(Function *F, StringRef OriginalName);
555
556public:
557 LowerTypeTestsModule(Module &M, ModuleAnalysisManager &AM,
558 ModuleSummaryIndex *ExportSummary,
559 const ModuleSummaryIndex *ImportSummary);
560
561 bool lower();
562
563 // Lower the module using the action and summary passed as command line
564 // arguments. For testing purposes only.
565 static bool runForTesting(Module &M, ModuleAnalysisManager &AM);
566};
567} // end anonymous namespace
568
569/// Build a bit set for list of offsets.
571 // Compute the byte offset of each address associated with this type
572 // identifier.
573 return BitSetBuilder(Offsets).build();
574}
575
576/// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
577/// Bits. This pattern matches to the bt instruction on x86.
579 Value *BitOffset) {
580 auto BitsType = cast<IntegerType>(Bits->getType());
581 unsigned BitWidth = BitsType->getBitWidth();
582
583 BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
584 Value *BitIndex =
585 B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
586 Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
587 Value *MaskedBits = B.CreateAnd(Bits, BitMask);
588 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
589}
590
591ByteArrayInfo *LowerTypeTestsModule::createByteArray(const BitSetInfo &BSI) {
592 // Create globals to stand in for byte arrays and masks. These never actually
593 // get initialized, we RAUW and erase them later in allocateByteArrays() once
594 // we know the offset and mask to use.
595 auto ByteArrayGlobal = new GlobalVariable(
596 M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
597 auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
599
600 ByteArrayInfos.emplace_back();
601 ByteArrayInfo *BAI = &ByteArrayInfos.back();
602
603 BAI->Bits = BSI.Bits;
604 BAI->BitSize = BSI.BitSize;
605 BAI->ByteArray = ByteArrayGlobal;
606 BAI->MaskGlobal = MaskGlobal;
607 return BAI;
608}
609
610void LowerTypeTestsModule::allocateByteArrays() {
611 llvm::stable_sort(ByteArrayInfos,
612 [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
613 return BAI1.BitSize > BAI2.BitSize;
614 });
615
616 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
617
619 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
620 ByteArrayInfo *BAI = &ByteArrayInfos[I];
621
622 uint8_t Mask;
623 BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
624
625 BAI->MaskGlobal->replaceAllUsesWith(
626 ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), PtrTy));
627 BAI->MaskGlobal->eraseFromParent();
628 if (BAI->MaskPtr)
629 *BAI->MaskPtr = Mask;
630 }
631
632 Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
633 auto ByteArray =
634 new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
635 GlobalValue::PrivateLinkage, ByteArrayConst);
636
637 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
638 ByteArrayInfo *BAI = &ByteArrayInfos[I];
640 ByteArray, ConstantInt::get(IntPtrTy, ByteArrayOffsets[I]));
641
642 // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
643 // that the pc-relative displacement is folded into the lea instead of the
644 // test instruction getting another displacement.
645 GlobalAlias *Alias = GlobalAlias::create(
646 Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
647 BAI->ByteArray->replaceAllUsesWith(Alias);
648 BAI->ByteArray->eraseFromParent();
649 }
650
651 ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
652 BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
653 BAB.BitAllocs[6] + BAB.BitAllocs[7];
654 ByteArraySizeBytes = BAB.Bytes.size();
655}
656
657/// Build a test that bit BitOffset is set in the type identifier that was
658/// lowered to TIL, which must be either an Inline or a ByteArray.
659Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
660 const TypeIdLowering &TIL,
661 Value *BitOffset) {
662 if (TIL.TheKind == TypeTestResolution::Inline) {
663 // If the bit set is sufficiently small, we can avoid a load by bit testing
664 // a constant.
665 return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
666 } else {
667 Constant *ByteArray = TIL.TheByteArray;
668 if (AvoidReuse && !ImportSummary) {
669 // Each use of the byte array uses a different alias. This makes the
670 // backend less likely to reuse previously computed byte array addresses,
671 // improving the security of the CFI mechanism based on this pass.
672 // This won't work when importing because TheByteArray is external.
674 "bits_use", ByteArray, &M);
675 }
676
677 Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
678 Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
679
680 Value *ByteAndMask =
681 B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
682 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
683 }
684}
685
686static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
687 Value *V, uint64_t COffset) {
688 if (auto GV = dyn_cast<GlobalObject>(V)) {
690 GV->getMetadata(LLVMContext::MD_type, Types);
691 for (MDNode *Type : Types) {
692 if (Type->getOperand(1) != TypeId)
693 continue;
696 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
697 ->getZExtValue();
698 if (COffset == Offset)
699 return true;
700 }
701 return false;
702 }
703
704 if (auto GEP = dyn_cast<GEPOperator>(V)) {
705 APInt APOffset(DL.getIndexSizeInBits(0), 0);
706 bool Result = GEP->accumulateConstantOffset(DL, APOffset);
707 if (!Result)
708 return false;
709 COffset += APOffset.getZExtValue();
710 return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
711 }
712
713 if (auto Op = dyn_cast<Operator>(V)) {
714 if (Op->getOpcode() == Instruction::BitCast)
715 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
716
717 if (Op->getOpcode() == Instruction::Select)
718 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
719 isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
720 }
721
722 return false;
723}
724
725/// Lower a llvm.type.test call to its implementation. Returns the value to
726/// replace the call with.
727Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
728 const TypeIdLowering &TIL) {
729 // Delay lowering if the resolution is currently unknown.
730 if (TIL.TheKind == TypeTestResolution::Unknown)
731 return nullptr;
732 if (TIL.TheKind == TypeTestResolution::Unsat)
733 return ConstantInt::getFalse(M.getContext());
734
735 Value *Ptr = CI->getArgOperand(0);
736 const DataLayout &DL = M.getDataLayout();
737 if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
738 return ConstantInt::getTrue(M.getContext());
739
740 BasicBlock *InitialBB = CI->getParent();
741
742 IRBuilder<> B(CI);
743
744 Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
745
746 Constant *OffsetedGlobalAsInt =
747 ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
748 if (TIL.TheKind == TypeTestResolution::Single)
749 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
750
751 // Here we compute `last element - address`. The reason why we do this instead
752 // of computing `address - first element` is that it leads to a slightly
753 // shorter instruction sequence on x86. Because it doesn't matter how we do
754 // the subtraction on other architectures, we do so unconditionally.
755 Value *PtrOffset = B.CreateSub(OffsetedGlobalAsInt, PtrAsInt);
756
757 // We need to check that the offset both falls within our range and is
758 // suitably aligned. We can check both properties at the same time by
759 // performing a right rotate by log2(alignment) followed by an integer
760 // comparison against the bitset size. The rotate will move the lower
761 // order bits that need to be zero into the higher order bits of the
762 // result, causing the comparison to fail if they are nonzero. The rotate
763 // also conveniently gives us a bit offset to use during the load from
764 // the bitset.
765 Value *BitOffset = B.CreateIntrinsic(IntPtrTy, Intrinsic::fshr,
766 {PtrOffset, PtrOffset, TIL.AlignLog2});
767
768 Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
769
770 // If the bit set is all ones, testing against it is unnecessary.
771 if (TIL.TheKind == TypeTestResolution::AllOnes)
772 return OffsetInRange;
773
774 // See if the intrinsic is used in the following common pattern:
775 // br(llvm.type.test(...), thenbb, elsebb)
776 // where nothing happens between the type test and the br.
777 // If so, create slightly simpler IR.
778 if (CI->hasOneUse())
779 if (auto *Br = dyn_cast<CondBrInst>(*CI->user_begin()))
780 if (CI->getNextNode() == Br) {
781 BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
782 BasicBlock *Else = Br->getSuccessor(1);
783 CondBrInst *NewBr = CondBrInst::Create(OffsetInRange, Then, Else);
784 NewBr->setMetadata(LLVMContext::MD_prof,
785 Br->getMetadata(LLVMContext::MD_prof));
786 ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
787
788 // Update phis in Else resulting from InitialBB being split
789 for (auto &Phi : Else->phis())
790 Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
791
792 IRBuilder<> ThenB(CI);
793 return createBitSetTest(ThenB, TIL, BitOffset);
794 }
795
796 MDBuilder MDB(M.getContext());
797 IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false,
798 MDB.createLikelyBranchWeights()));
799
800 // Now that we know that the offset is in range and aligned, load the
801 // appropriate bit from the bitset.
802 Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
803
804 // The value we want is 0 if we came directly from the initial block
805 // (having failed the range or alignment checks), or the loaded bit if
806 // we came from the block in which we loaded it.
807 B.SetInsertPoint(CI);
808 PHINode *P = B.CreatePHI(Int1Ty, 2);
809 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
810 P->addIncoming(Bit, ThenB.GetInsertBlock());
811 return P;
812}
813
814/// Given a disjoint set of type identifiers and globals, lay out the globals,
815/// build the bit sets and lower the llvm.type.test calls.
816void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
818 // Build a new global with the combined contents of the referenced globals.
819 // This global is a struct whose even-indexed elements contain the original
820 // contents of the referenced globals and whose odd-indexed elements contain
821 // any padding required to align the next element to the next power of 2 plus
822 // any additional padding required to meet its alignment requirements.
823 std::vector<Constant *> GlobalInits;
824 const DataLayout &DL = M.getDataLayout();
825 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
826 Align MaxAlign;
827 uint64_t CurOffset = 0;
828 uint64_t DesiredPadding = 0;
829 for (GlobalTypeMember *G : Globals) {
830 auto *GV = cast<GlobalVariable>(G->getGlobal());
831 Align Alignment =
832 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
833 MaxAlign = std::max(MaxAlign, Alignment);
834 uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
835 GlobalLayout[G] = GVOffset;
836 if (GVOffset != 0) {
837 uint64_t Padding = GVOffset - CurOffset;
838 GlobalInits.push_back(
840 }
841
842 GlobalInits.push_back(GV->getInitializer());
843 uint64_t InitSize = GV->getGlobalSize(DL);
844 CurOffset = GVOffset + InitSize;
845
846 // Compute the amount of padding that we'd like for the next element.
847 DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
848
849 // Experiments of different caps with Chromium on both x64 and ARM64
850 // have shown that the 32-byte cap generates the smallest binary on
851 // both platforms while different caps yield similar performance.
852 // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
853 if (DesiredPadding > 32)
854 DesiredPadding = alignTo(InitSize, 32) - InitSize;
855 }
856
857 Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
858 auto *CombinedGlobal =
859 new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
861 CombinedGlobal->setAlignment(MaxAlign);
862
863 StructType *NewTy = cast<StructType>(NewInit->getType());
864 lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
865
866 // Build aliases pointing to offsets into the combined global for each
867 // global from which we built the combined global, and replace references
868 // to the original globals with references to the aliases.
869 for (unsigned I = 0; I != Globals.size(); ++I) {
870 GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
871
872 // Multiply by 2 to account for padding elements.
873 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
874 ConstantInt::get(Int32Ty, I * 2)};
875 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
876 NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
877 assert(GV->getType()->getAddressSpace() == 0);
878 GlobalAlias *GAlias =
879 GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
880 "", CombinedGlobalElemPtr, &M);
881 GAlias->setVisibility(GV->getVisibility());
882 GAlias->takeName(GV);
883 GV->replaceAllUsesWith(GAlias);
884 GV->eraseFromParent();
885 }
886}
887
888bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
889 return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
890 ObjectFormat == Triple::ELF;
891}
892
893/// Export the given type identifier so that ThinLTO backends may import it.
894/// Type identifiers are exported by adding coarse-grained information about how
895/// to test the type identifier to the summary, and creating symbols in the
896/// object file (aliases and absolute symbols) containing fine-grained
897/// information about the type identifier.
898///
899/// Returns a pointer to the location in which to store the bitmask, if
900/// applicable.
901uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
902 const TypeIdLowering &TIL) {
903 TypeTestResolution &TTRes =
904 ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
905 TTRes.TheKind = TIL.TheKind;
906
907 auto ExportGlobal = [&](StringRef Name, Constant *C) {
908 GlobalAlias *GA =
910 "__typeid_" + TypeId + "_" + Name, C, &M);
912 };
913
914 auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
915 if (shouldExportConstantsAsAbsoluteSymbols())
916 ExportGlobal(Name, ConstantExpr::getIntToPtr(C, PtrTy));
917 else
918 Storage = cast<ConstantInt>(C)->getZExtValue();
919 };
920
921 if (TIL.TheKind != TypeTestResolution::Unsat)
922 ExportGlobal("global_addr", TIL.OffsetedGlobal);
923
924 if (TIL.TheKind == TypeTestResolution::ByteArray ||
925 TIL.TheKind == TypeTestResolution::Inline ||
926 TIL.TheKind == TypeTestResolution::AllOnes) {
927 ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
928 ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
929
930 uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
931 if (TIL.TheKind == TypeTestResolution::Inline)
932 TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
933 else
934 TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
935 }
936
937 if (TIL.TheKind == TypeTestResolution::ByteArray) {
938 ExportGlobal("byte_array", TIL.TheByteArray);
939 if (shouldExportConstantsAsAbsoluteSymbols())
940 ExportGlobal("bit_mask", TIL.BitMask);
941 else
942 return &TTRes.BitMask;
943 }
944
945 if (TIL.TheKind == TypeTestResolution::Inline)
946 ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
947
948 return nullptr;
949}
950
951LowerTypeTestsModule::TypeIdLowering
952LowerTypeTestsModule::importTypeId(StringRef TypeId) {
953 const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
954 if (!TidSummary)
955 return {}; // Unsat: no globals match this type id.
956 const TypeTestResolution &TTRes = TidSummary->TTRes;
957
958 TypeIdLowering TIL;
959 TIL.TheKind = TTRes.TheKind;
960
961 auto ImportGlobal = [&](StringRef Name) {
962 // Give the global a type of length 0 so that it is not assumed not to alias
963 // with any other global.
964 GlobalVariable *GV = M.getOrInsertGlobal(
965 ("__typeid_" + TypeId + "_" + Name).str(), Int8Arr0Ty);
967 return GV;
968 };
969
970 auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
971 Type *Ty) {
972 if (!shouldExportConstantsAsAbsoluteSymbols()) {
973 Constant *C =
974 ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
975 if (!isa<IntegerType>(Ty))
977 return C;
978 }
979
980 Constant *C = ImportGlobal(Name);
981 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
982 if (isa<IntegerType>(Ty))
984 if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
985 return C;
986
987 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
988 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
989 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
990 GV->setMetadata(LLVMContext::MD_absolute_symbol,
991 MDNode::get(M.getContext(), {MinC, MaxC}));
992 };
993 if (AbsWidth == IntPtrTy->getBitWidth()) {
994 uint64_t AllOnes = IntPtrTy->getBitMask();
995 SetAbsRange(AllOnes, AllOnes); // Full set.
996 } else {
997 SetAbsRange(0, 1ull << AbsWidth);
998 }
999 return C;
1000 };
1001
1002 if (TIL.TheKind != TypeTestResolution::Unsat) {
1003 auto *GV = ImportGlobal("global_addr");
1004 // This is either a vtable (in .data.rel.ro) or a jump table (in .text).
1005 // Either way it's expected to be in the low 2 GiB, so set the small code
1006 // model.
1007 //
1008 // For .data.rel.ro, we currently place all such sections in the low 2 GiB
1009 // [1], and for .text the sections are expected to be in the low 2 GiB under
1010 // the small and medium code models [2] and this pass only supports those
1011 // code models (e.g. jump tables use jmp instead of movabs/jmp).
1012 //
1013 // [1]https://github.com/llvm/llvm-project/pull/137742
1014 // [2]https://maskray.me/blog/2023-05-14-relocation-overflow-and-code-models
1016 TIL.OffsetedGlobal = GV;
1017 }
1018
1019 if (TIL.TheKind == TypeTestResolution::ByteArray ||
1020 TIL.TheKind == TypeTestResolution::Inline ||
1021 TIL.TheKind == TypeTestResolution::AllOnes) {
1022 TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, IntPtrTy);
1023 TIL.SizeM1 =
1024 ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
1025 }
1026
1027 if (TIL.TheKind == TypeTestResolution::ByteArray) {
1028 TIL.TheByteArray = ImportGlobal("byte_array");
1029 TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, PtrTy);
1030 }
1031
1032 if (TIL.TheKind == TypeTestResolution::Inline)
1033 TIL.InlineBits = ImportConstant(
1034 "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1035 TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1036
1037 return TIL;
1038}
1039
1040void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1041 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1042 if (!TypeIdMDVal)
1043 report_fatal_error("Second argument of llvm.type.test must be metadata");
1044
1045 auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1046 // If this is a local unpromoted type, which doesn't have a metadata string,
1047 // treat as Unknown and delay lowering, so that we can still utilize it for
1048 // later optimizations.
1049 if (!TypeIdStr)
1050 return;
1051
1052 TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1053 Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1054 if (Lowered) {
1055 CI->replaceAllUsesWith(Lowered);
1056 CI->eraseFromParent();
1057 }
1058}
1059
1060void LowerTypeTestsModule::maybeReplaceComdat(Function *F,
1061 StringRef OriginalName) {
1062 // For COFF we should also rename the comdat if this function also
1063 // happens to be the key function. Even if the comdat name changes, this
1064 // should still be fine since comdat and symbol resolution happens
1065 // before LTO, so all symbols which would prevail have been selected.
1066 if (F->hasComdat() && ObjectFormat == Triple::COFF &&
1067 F->getComdat()->getName() == OriginalName) {
1068 Comdat *OldComdat = F->getComdat();
1069 Comdat *NewComdat = M.getOrInsertComdat(F->getName());
1070 for (GlobalObject &GO : M.global_objects()) {
1071 if (GO.getComdat() == OldComdat)
1072 GO.setComdat(NewComdat);
1073 }
1074 }
1075}
1076
1077// ThinLTO backend: the function F has a jump table entry; update this module
1078// accordingly. isJumpTableCanonical describes the type of the jump table entry.
1079void LowerTypeTestsModule::importFunction(Function *F,
1080 bool isJumpTableCanonical) {
1081 assert(F->getType()->getAddressSpace() == 0);
1082
1083 GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1084 std::string Name = std::string(F->getName());
1085
1086 if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1087 // Non-dso_local functions may be overriden at run time,
1088 // don't short curcuit them
1089 if (F->isDSOLocal()) {
1090 Function *RealF = Function::Create(F->getFunctionType(),
1092 F->getAddressSpace(),
1093 Name + ".cfi", &M);
1095 replaceDirectCalls(F, RealF);
1096 }
1097 return;
1098 }
1099
1100 Function *FDecl;
1101 if (!isJumpTableCanonical) {
1102 // Either a declaration of an external function or a reference to a locally
1103 // defined jump table.
1104 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1105 F->getAddressSpace(), Name + ".cfi_jt", &M);
1107 } else {
1108 F->setName(Name + ".cfi");
1109 maybeReplaceComdat(F, Name);
1110 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1111 F->getAddressSpace(), Name, &M);
1112 FDecl->setVisibility(Visibility);
1113 Visibility = GlobalValue::HiddenVisibility;
1114
1115 // Update aliases pointing to this function to also include the ".cfi" suffix,
1116 // We expect the jump table entry to either point to the real function or an
1117 // alias. Redirect all other users to the jump table entry.
1118 for (auto &U : F->uses()) {
1119 if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1120 std::string AliasName = A->getName().str() + ".cfi";
1121 Function *AliasDecl = Function::Create(
1122 F->getFunctionType(), GlobalValue::ExternalLinkage,
1123 F->getAddressSpace(), "", &M);
1124 AliasDecl->takeName(A);
1125 A->replaceAllUsesWith(AliasDecl);
1126 A->setName(AliasName);
1127 }
1128 }
1129 }
1130
1131 if (F->hasExternalWeakLinkage())
1132 replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1133 else
1134 replaceCfiUses(F, FDecl, isJumpTableCanonical);
1135
1136 // Set visibility late because it's used in replaceCfiUses() to determine
1137 // whether uses need to be replaced.
1138 F->setVisibility(Visibility);
1139}
1140
1141static auto
1143 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1145 // Pre-populate the map with interesting type identifiers.
1146 for (Metadata *TypeId : TypeIds)
1147 OffsetsByTypeID[TypeId];
1148 for (const auto &[Mem, MemOff] : GlobalLayout) {
1149 for (MDNode *Type : Mem->types()) {
1150 auto It = OffsetsByTypeID.find(Type->getOperand(1));
1151 if (It == OffsetsByTypeID.end())
1152 continue;
1155 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
1156 ->getZExtValue();
1157 It->second.push_back(MemOff + Offset);
1158 }
1159 }
1160
1162 BitSets.reserve(TypeIds.size());
1163 for (Metadata *TypeId : TypeIds) {
1164 BitSets.emplace_back(TypeId, buildBitSet(OffsetsByTypeID[TypeId]));
1165 LLVM_DEBUG({
1166 if (auto MDS = dyn_cast<MDString>(TypeId))
1167 dbgs() << MDS->getString() << ": ";
1168 else
1169 dbgs() << "<unnamed>: ";
1170 BitSets.back().second.print(dbgs());
1171 });
1172 }
1173
1174 return BitSets;
1175}
1176
1177void LowerTypeTestsModule::lowerTypeTestCalls(
1178 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1179 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1180 // For each type identifier in this disjoint set...
1181 for (const auto &[TypeId, BSI] : buildBitSets(TypeIds, GlobalLayout)) {
1182 ByteArrayInfo *BAI = nullptr;
1183 TypeIdLowering TIL;
1184
1185 uint64_t GlobalOffset =
1186 BSI.ByteOffset + ((BSI.BitSize - 1) << BSI.AlignLog2);
1187 TIL.OffsetedGlobal = ConstantExpr::getPtrAdd(
1188 CombinedGlobalAddr, ConstantInt::get(IntPtrTy, GlobalOffset)),
1189 TIL.AlignLog2 = ConstantInt::get(IntPtrTy, BSI.AlignLog2);
1190 TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1191 if (BSI.isAllOnes()) {
1192 TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1193 : TypeTestResolution::AllOnes;
1194 } else if (BSI.BitSize <= IntPtrTy->getBitWidth()) {
1195 TIL.TheKind = TypeTestResolution::Inline;
1196 uint64_t InlineBits = 0;
1197 for (auto Bit : BSI.Bits)
1198 InlineBits |= uint64_t(1) << Bit;
1199 if (InlineBits == 0)
1200 TIL.TheKind = TypeTestResolution::Unsat;
1201 else
1202 TIL.InlineBits = ConstantInt::get(
1203 (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1204 } else {
1205 TIL.TheKind = TypeTestResolution::ByteArray;
1206 ++NumByteArraysCreated;
1207 BAI = createByteArray(BSI);
1208 TIL.TheByteArray = BAI->ByteArray;
1209 TIL.BitMask = BAI->MaskGlobal;
1210 }
1211
1212 TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1213
1214 if (TIUI.IsExported) {
1215 uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1216 if (BAI)
1217 BAI->MaskPtr = MaskPtr;
1218 }
1219
1220 // Lower each call to llvm.type.test for this type identifier.
1221 for (CallInst *CI : TIUI.CallSites) {
1222 ++NumTypeTestCallsLowered;
1223 Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1224 if (Lowered) {
1225 CI->replaceAllUsesWith(Lowered);
1226 CI->eraseFromParent();
1227 }
1228 }
1229 }
1230}
1231
1232void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1233 if (Type->getNumOperands() != 2)
1234 report_fatal_error("All operands of type metadata must have 2 elements");
1235
1236 if (GO->isThreadLocal())
1237 report_fatal_error("Bit set element may not be thread-local");
1238 if (isa<GlobalVariable>(GO) && GO->hasSection())
1240 "A member of a type identifier may not have an explicit section");
1241
1242 // FIXME: We previously checked that global var member of a type identifier
1243 // must be a definition, but the IR linker may leave type metadata on
1244 // declarations. We should restore this check after fixing PR31759.
1245
1246 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1247 if (!OffsetConstMD)
1248 report_fatal_error("Type offset must be a constant");
1249 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1250 if (!OffsetInt)
1251 report_fatal_error("Type offset must be an integer constant");
1252}
1253
1254static const unsigned kX86JumpTableEntrySize = 8;
1255static const unsigned kX86IBTJumpTableEntrySize = 16;
1256static const unsigned kARMJumpTableEntrySize = 4;
1257static const unsigned kARMBTIJumpTableEntrySize = 8;
1258static const unsigned kARMv6MJumpTableEntrySize = 16;
1259static const unsigned kRISCVJumpTableEntrySize = 8;
1260static const unsigned kLOONGARCH64JumpTableEntrySize = 8;
1261static const unsigned kHexagonJumpTableEntrySize = 4;
1262
1263bool LowerTypeTestsModule::hasBranchTargetEnforcement() {
1264 if (HasBranchTargetEnforcement == -1) {
1265 // First time this query has been called. Find out the answer by checking
1266 // the module flags.
1267 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1268 M.getModuleFlag("branch-target-enforcement")))
1269 HasBranchTargetEnforcement = !BTE->isZero();
1270 else
1271 HasBranchTargetEnforcement = 0;
1272 }
1273 return HasBranchTargetEnforcement;
1274}
1275
1276unsigned
1277LowerTypeTestsModule::getJumpTableEntrySize(Triple::ArchType JumpTableArch) {
1278 switch (JumpTableArch) {
1279 case Triple::x86:
1280 case Triple::x86_64:
1281 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1282 M.getModuleFlag("cf-protection-branch")))
1283 if (MD->getZExtValue())
1286 case Triple::arm:
1288 case Triple::thumb:
1289 if (CanUseThumbBWJumpTable) {
1290 if (hasBranchTargetEnforcement())
1293 } else {
1295 }
1296 case Triple::aarch64:
1297 if (hasBranchTargetEnforcement())
1300 case Triple::riscv32:
1301 case Triple::riscv64:
1305 case Triple::hexagon:
1307 default:
1308 report_fatal_error("Unsupported architecture for jump tables");
1309 }
1310}
1311
1312// Create an inline asm constant representing a jump table entry for the target.
1313// This consists of an instruction sequence containing a relative branch to
1314// Dest.
1315InlineAsm *
1316LowerTypeTestsModule::createJumpTableEntryAsm(Triple::ArchType JumpTableArch) {
1317 std::string Asm;
1318 raw_string_ostream AsmOS(Asm);
1319
1320 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1321 bool Endbr = false;
1322 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1323 M.getModuleFlag("cf-protection-branch")))
1324 Endbr = !MD->isZero();
1325 if (Endbr)
1326 AsmOS << (JumpTableArch == Triple::x86 ? "endbr32\n" : "endbr64\n");
1327 AsmOS << "jmp ${0:c}@plt\n";
1328 if (Endbr)
1329 AsmOS << ".balign 16, 0xcc\n";
1330 else
1331 AsmOS << "int3\nint3\nint3\n";
1332 } else if (JumpTableArch == Triple::arm) {
1333 AsmOS << "b $0\n";
1334 } else if (JumpTableArch == Triple::aarch64) {
1335 if (hasBranchTargetEnforcement())
1336 AsmOS << "bti c\n";
1337 AsmOS << "b $0\n";
1338 } else if (JumpTableArch == Triple::thumb) {
1339 if (!CanUseThumbBWJumpTable) {
1340 // In Armv6-M, this sequence will generate a branch without corrupting
1341 // any registers. We use two stack words; in the second, we construct the
1342 // address we'll pop into pc, and the first is used to save and restore
1343 // r0 which we use as a temporary register.
1344 //
1345 // To support position-independent use cases, the offset of the target
1346 // function is stored as a relative offset (which will expand into an
1347 // R_ARM_REL32 relocation in ELF, and presumably the equivalent in other
1348 // object file types), and added to pc after we load it. (The alternative
1349 // B.W is automatically pc-relative.)
1350 //
1351 // There are five 16-bit Thumb instructions here, so the .balign 4 adds a
1352 // sixth halfword of padding, and then the offset consumes a further 4
1353 // bytes, for a total of 16, which is very convenient since entries in
1354 // this jump table need to have power-of-two size.
1355 AsmOS << "push {r0,r1}\n"
1356 << "ldr r0, 1f\n"
1357 << "0: add r0, r0, pc\n"
1358 << "str r0, [sp, #4]\n"
1359 << "pop {r0,pc}\n"
1360 << ".balign 4\n"
1361 << "1: .word $0 - (0b + 4)\n";
1362 } else {
1363 if (hasBranchTargetEnforcement())
1364 AsmOS << "bti\n";
1365 AsmOS << "b.w $0\n";
1366 }
1367 } else if (JumpTableArch == Triple::riscv32 ||
1368 JumpTableArch == Triple::riscv64) {
1369 AsmOS << "tail $0@plt\n";
1370 } else if (JumpTableArch == Triple::loongarch64) {
1371 AsmOS << "pcalau12i $$t0, %pc_hi20($0)\n"
1372 << "jirl $$r0, $$t0, %pc_lo12($0)\n";
1373 } else if (JumpTableArch == Triple::hexagon) {
1374 AsmOS << "jump $0\n";
1375 } else {
1376 report_fatal_error("Unsupported architecture for jump tables");
1377 }
1378
1379 return InlineAsm::get(
1380 FunctionType::get(Type::getVoidTy(M.getContext()), PtrTy, false),
1381 AsmOS.str(), "s",
1382 /*hasSideEffects=*/true);
1383}
1384
1385/// Given a disjoint set of type identifiers and functions, build the bit sets
1386/// and lower the llvm.type.test calls, architecture dependently.
1387void LowerTypeTestsModule::buildBitSetsFromFunctions(
1389 if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1390 Arch == Triple::thumb || Arch == Triple::aarch64 ||
1391 Arch == Triple::riscv32 || Arch == Triple::riscv64 ||
1392 Arch == Triple::loongarch64 || Arch == Triple::hexagon)
1393 buildBitSetsFromFunctionsNative(TypeIds, Functions);
1394 else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1395 buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1396 else
1397 report_fatal_error("Unsupported architecture for jump tables");
1398}
1399
1400void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1401 GlobalVariable *GV) {
1402 if (WeakInitializerFn == nullptr) {
1403 WeakInitializerFn = Function::Create(
1404 FunctionType::get(Type::getVoidTy(M.getContext()),
1405 /* IsVarArg */ false),
1407 M.getDataLayout().getProgramAddressSpace(),
1408 "__cfi_global_var_init", &M);
1409 BasicBlock *BB =
1410 BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1411 ReturnInst::Create(M.getContext(), BB);
1412 WeakInitializerFn->setSection(
1413 ObjectFormat == Triple::MachO
1414 ? "__TEXT,__StaticInit,regular,pure_instructions"
1415 : ".text.startup");
1416 // This code is equivalent to relocation application, and should run at the
1417 // earliest possible time (i.e. with the highest priority).
1418 appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1419 }
1420
1421 IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1422 GV->setConstant(false);
1423 IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1425}
1426
1427void LowerTypeTestsModule::findGlobalVariableUsersOf(
1428 Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1429 for (auto *U : C->users()){
1430 if (auto *GV = dyn_cast<GlobalVariable>(U))
1431 Out.insert(GV);
1432 else if (auto *C2 = dyn_cast<Constant>(U))
1433 findGlobalVariableUsersOf(C2, Out);
1434 }
1435}
1436
1437// Replace all uses of F with (F ? JT : 0).
1438void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1439 Function *F, Constant *JT, bool IsJumpTableCanonical) {
1440 // The target expression can not appear in a constant initializer on most
1441 // (all?) targets. Switch to a runtime initializer.
1442 SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1443 findGlobalVariableUsersOf(F, GlobalVarUsers);
1444 for (auto *GV : GlobalVarUsers) {
1445 if (GV == GlobalAnnotation)
1446 continue;
1447 moveInitializerToModuleConstructor(GV);
1448 }
1449
1450 // Can not RAUW F with an expression that uses F. Replace with a temporary
1451 // placeholder first.
1452 Function *PlaceholderFn =
1454 F->getAddressSpace(), "", &M);
1455 replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1456
1458 // Don't use range based loop, because use list will be modified.
1459 while (!PlaceholderFn->use_empty()) {
1460 Use &U = *PlaceholderFn->use_begin();
1461 auto *InsertPt = dyn_cast<Instruction>(U.getUser());
1462 assert(InsertPt && "Non-instruction users should have been eliminated");
1463 auto *PN = dyn_cast<PHINode>(InsertPt);
1464 if (PN)
1465 InsertPt = PN->getIncomingBlock(U)->getTerminator();
1466 IRBuilder Builder(InsertPt);
1467 Value *ICmp = Builder.CreateICmp(CmpInst::ICMP_NE, F,
1468 Constant::getNullValue(F->getType()));
1469 Value *Select = Builder.CreateSelect(ICmp, JT,
1470 Constant::getNullValue(F->getType()));
1471
1472 if (auto *SI = dyn_cast<SelectInst>(Select))
1474 // For phi nodes, we need to update the incoming value for all operands
1475 // with the same predecessor.
1476 if (PN)
1477 PN->setIncomingValueForBlock(InsertPt->getParent(), Select);
1478 else
1479 U.set(Select);
1480 }
1481 PlaceholderFn->eraseFromParent();
1482}
1483
1484static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1485 Attribute TFAttr = F->getFnAttribute("target-features");
1486 if (TFAttr.isValid()) {
1488 TFAttr.getValueAsString().split(Features, ',');
1489 for (StringRef Feature : Features) {
1490 if (Feature == "-thumb-mode")
1491 return false;
1492 else if (Feature == "+thumb-mode")
1493 return true;
1494 }
1495 }
1496
1497 return ModuleArch == Triple::thumb;
1498}
1499
1500// Each jump table must be either ARM or Thumb as a whole for the bit-test math
1501// to work. Pick one that matches the majority of members to minimize interop
1502// veneers inserted by the linker.
1503Triple::ArchType LowerTypeTestsModule::selectJumpTableArmEncoding(
1504 ArrayRef<GlobalTypeMember *> Functions) {
1505 if (Arch != Triple::arm && Arch != Triple::thumb)
1506 return Arch;
1507
1508 if (!CanUseThumbBWJumpTable && CanUseArmJumpTable) {
1509 // In architectures that provide Arm and Thumb-1 but not Thumb-2,
1510 // we should always prefer the Arm jump table format, because the
1511 // Thumb-1 one is larger and slower.
1512 return Triple::arm;
1513 }
1514
1515 // Otherwise, go with majority vote.
1516 unsigned ArmCount = 0, ThumbCount = 0;
1517 for (const auto GTM : Functions) {
1518 if (!GTM->isJumpTableCanonical()) {
1519 // PLT stubs are always ARM.
1520 // FIXME: This is the wrong heuristic for non-canonical jump tables.
1521 ++ArmCount;
1522 continue;
1523 }
1524
1525 Function *F = cast<Function>(GTM->getGlobal());
1526 ++(isThumbFunction(F, Arch) ? ThumbCount : ArmCount);
1527 }
1528
1529 return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1530}
1531
1532// Create location for each function entry which should look like this:
1533// frame #0: c::c() (.cfi_jt) at sanitizer/ubsan_interface.h:0:0
1534// frame #1: __ubsan_check_cfi_icall_jt at sanitizer/ubsan_interface.h:0
1537 Module &M = *F->getParent();
1538 DICompileUnit *CU = nullptr;
1539 auto CUs = M.debug_compile_units();
1540 if (!CUs.empty())
1541 CU = *CUs.begin();
1542
1543 DIBuilder DIB(M, /*AllowUnresolved=*/true, CU);
1544 DIFile *File = DIB.createFile("ubsan_interface.h", "sanitizer");
1545 if (!CU) {
1546 // Synthetic module (like ld-temp.o), it frequently lacks a DICompileUnit
1547 // even if the rest of the program has debug info.
1548 CU = DIB.createCompileUnit(
1549 DISourceLanguageName(dwarf::DW_LANG_C), File, "llvm", true, "", 0, "",
1551 }
1552
1553 DISubroutineType *DIFnTy = DIB.createSubroutineType(nullptr);
1554
1555 DISubprogram *UbsanSP = DIB.createFunction(
1556 CU, "__ubsan_check_cfi_icall_jt", {}, File, 0, DIFnTy, 0,
1557 DINode::FlagArtificial, DISubprogram::SPFlagDefinition);
1558
1559 F->setSubprogram(UbsanSP);
1560
1561 DILocation *UbsanLoc = DILocation::get(M.getContext(), 0, 0, UbsanSP);
1562
1563 SmallVector<DILocation *> Locations;
1564 Locations.reserve(Functions.size());
1565
1566 for (auto *Func : Functions) {
1567 StringRef FuncName = Func->getGlobal()->getName();
1568 FuncName.consume_back(".cfi");
1569 DISubprogram *JumpSP = DIB.createFunction(
1570 CU, (FuncName + ".cfi_jt").str(), {}, File, 0, DIFnTy, 0,
1571 DINode::FlagArtificial, DISubprogram::SPFlagDefinition);
1572
1573 DILocation *EntryLoc =
1574 DILocation::get(M.getContext(), 0, 0, JumpSP, UbsanLoc);
1575
1576 Locations.push_back(EntryLoc);
1577 }
1578
1579 DIB.finalize();
1580
1581 return Locations;
1582}
1583
1584void LowerTypeTestsModule::createJumpTable(
1585 Function *F, ArrayRef<GlobalTypeMember *> Functions,
1586 Triple::ArchType JumpTableArch) {
1587 BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1588 IRBuilder<> IRB(BB);
1589
1591 if (M.getDwarfVersion() != 0 && EnableJumpTableDebugInfo)
1592 Locations = createJumpTableDebugInfo(F, Functions);
1593
1594 InlineAsm *JumpTableAsm = createJumpTableEntryAsm(JumpTableArch);
1595
1596 // Check if all entries have the NoUnwind attribute.
1597 // If all entries have it, we can safely mark the
1598 // cfi.jumptable as NoUnwind, otherwise, direct calls
1599 // to the jump table will not handle exceptions properly
1600 bool areAllEntriesNounwind = true;
1601 assert(Locations.empty() || Functions.size() == Locations.size());
1602 for (auto [GTM, Loc] : zip_longest(Functions, Locations)) {
1603 if (Loc.has_value())
1604 IRB.SetCurrentDebugLocation(*Loc);
1605 if (!cast<Function>((*GTM)->getGlobal())
1606 ->hasFnAttribute(Attribute::NoUnwind)) {
1607 areAllEntriesNounwind = false;
1608 }
1609 IRB.CreateCall(JumpTableAsm, (*GTM)->getGlobal());
1610 }
1611 IRB.CreateUnreachable();
1612
1613 // Align the whole table by entry size.
1614 F->setAlignment(Align(getJumpTableEntrySize(JumpTableArch)));
1615 F->addFnAttr(Attribute::Naked);
1616 if (JumpTableArch == Triple::arm)
1617 F->addFnAttr("target-features", "-thumb-mode");
1618 if (JumpTableArch == Triple::thumb) {
1619 if (hasBranchTargetEnforcement()) {
1620 // If we're generating a Thumb jump table with BTI, add a target-features
1621 // setting to ensure BTI can be assembled.
1622 F->addFnAttr("target-features", "+thumb-mode,+pacbti");
1623 } else {
1624 F->addFnAttr("target-features", "+thumb-mode");
1625 if (CanUseThumbBWJumpTable) {
1626 // Thumb jump table assembly needs Thumb2. The following attribute is
1627 // added by Clang for -march=armv7.
1628 F->addFnAttr("target-cpu", "cortex-a8");
1629 }
1630 }
1631 }
1632 // When -mbranch-protection= is used, the inline asm adds a BTI. Suppress BTI
1633 // for the function to avoid double BTI. This is a no-op without
1634 // -mbranch-protection=.
1635 if (JumpTableArch == Triple::aarch64 || JumpTableArch == Triple::thumb) {
1636 if (F->hasFnAttribute("branch-target-enforcement"))
1637 F->removeFnAttr("branch-target-enforcement");
1638 if (F->hasFnAttribute("sign-return-address"))
1639 F->removeFnAttr("sign-return-address");
1640 }
1641 if (JumpTableArch == Triple::riscv32 || JumpTableArch == Triple::riscv64) {
1642 // Make sure the jump table assembly is not modified by the assembler or
1643 // the linker.
1644 F->addFnAttr("target-features", "-c,-relax");
1645 }
1646 // When -fcf-protection= is used, the inline asm adds an ENDBR. Suppress ENDBR
1647 // for the function to avoid double ENDBR. This is a no-op without
1648 // -fcf-protection=.
1649 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64)
1650 F->addFnAttr(Attribute::NoCfCheck);
1651
1652 // Make sure we don't emit .eh_frame for this function if it isn't needed.
1653 if (areAllEntriesNounwind)
1654 F->addFnAttr(Attribute::NoUnwind);
1655
1656 // Make sure we do not inline any calls to the cfi.jumptable.
1657 F->addFnAttr(Attribute::NoInline);
1658}
1659
1660/// Given a disjoint set of type identifiers and functions, build a jump table
1661/// for the functions, build the bit sets and lower the llvm.type.test calls.
1662void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1664 // Unlike the global bitset builder, the function bitset builder cannot
1665 // re-arrange functions in a particular order and base its calculations on the
1666 // layout of the functions' entry points, as we have no idea how large a
1667 // particular function will end up being (the size could even depend on what
1668 // this pass does!) Instead, we build a jump table, which is a block of code
1669 // consisting of one branch instruction for each of the functions in the bit
1670 // set that branches to the target function, and redirect any taken function
1671 // addresses to the corresponding jump table entry. In the object file's
1672 // symbol table, the symbols for the target functions also refer to the jump
1673 // table entries, so that addresses taken outside the module will pass any
1674 // verification done inside the module.
1675 //
1676 // In more concrete terms, suppose we have three functions f, g, h which are
1677 // of the same type, and a function foo that returns their addresses:
1678 //
1679 // f:
1680 // mov 0, %eax
1681 // ret
1682 //
1683 // g:
1684 // mov 1, %eax
1685 // ret
1686 //
1687 // h:
1688 // mov 2, %eax
1689 // ret
1690 //
1691 // foo:
1692 // mov f, %eax
1693 // mov g, %edx
1694 // mov h, %ecx
1695 // ret
1696 //
1697 // We output the jump table as module-level inline asm string. The end result
1698 // will (conceptually) look like this:
1699 //
1700 // f = .cfi.jumptable
1701 // g = .cfi.jumptable + 4
1702 // h = .cfi.jumptable + 8
1703 // .cfi.jumptable:
1704 // jmp f.cfi ; 5 bytes
1705 // int3 ; 1 byte
1706 // int3 ; 1 byte
1707 // int3 ; 1 byte
1708 // jmp g.cfi ; 5 bytes
1709 // int3 ; 1 byte
1710 // int3 ; 1 byte
1711 // int3 ; 1 byte
1712 // jmp h.cfi ; 5 bytes
1713 // int3 ; 1 byte
1714 // int3 ; 1 byte
1715 // int3 ; 1 byte
1716 //
1717 // f.cfi:
1718 // mov 0, %eax
1719 // ret
1720 //
1721 // g.cfi:
1722 // mov 1, %eax
1723 // ret
1724 //
1725 // h.cfi:
1726 // mov 2, %eax
1727 // ret
1728 //
1729 // foo:
1730 // mov f, %eax
1731 // mov g, %edx
1732 // mov h, %ecx
1733 // ret
1734 //
1735 // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1736 // normal case the check can be carried out using the same kind of simple
1737 // arithmetic that we normally use for globals.
1738
1739 // FIXME: find a better way to represent the jumptable in the IR.
1740 assert(!Functions.empty());
1741
1742 // Decide on the jump table encoding, so that we know how big the
1743 // entries will be.
1744 Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions);
1745
1746 // Build a simple layout based on the regular layout of jump tables.
1747 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1748 unsigned EntrySize = getJumpTableEntrySize(JumpTableArch);
1749 for (unsigned I = 0; I != Functions.size(); ++I)
1750 GlobalLayout[Functions[I]] = I * EntrySize;
1751
1752 Function *JumpTableFn =
1754 /* IsVarArg */ false),
1756 M.getDataLayout().getProgramAddressSpace(),
1757 ".cfi.jumptable", &M);
1758 ArrayType *JumpTableEntryType = ArrayType::get(Int8Ty, EntrySize);
1760 ArrayType::get(JumpTableEntryType, Functions.size());
1762 JumpTableFn, PointerType::getUnqual(M.getContext()));
1763
1764 lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1765
1766 // Build aliases pointing to offsets into the jump table, and replace
1767 // references to the original functions with references to the aliases.
1768 for (unsigned I = 0; I != Functions.size(); ++I) {
1769 Function *F = cast<Function>(Functions[I]->getGlobal());
1770 bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1771
1772 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
1773 JumpTableType, JumpTable,
1774 ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1775 ConstantInt::get(IntPtrTy, I)});
1776
1777 const bool IsExported = Functions[I]->isExported();
1778 if (!IsJumpTableCanonical) {
1781 GlobalAlias *JtAlias = GlobalAlias::create(JumpTableEntryType, 0, LT,
1782 F->getName() + ".cfi_jt",
1783 CombinedGlobalElemPtr, &M);
1784 if (IsExported)
1786 else
1787 appendToUsed(M, {JtAlias});
1788 }
1789
1790 if (IsExported) {
1791 if (IsJumpTableCanonical)
1792 ExportSummary->cfiFunctionDefs().emplace(F->getName());
1793 else
1794 ExportSummary->cfiFunctionDecls().emplace(F->getName());
1795 }
1796
1797 if (!IsJumpTableCanonical) {
1798 if (F->hasExternalWeakLinkage())
1799 replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1800 IsJumpTableCanonical);
1801 else
1802 replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1803 } else {
1804 assert(F->getType()->getAddressSpace() == 0);
1805
1806 GlobalAlias *FAlias =
1807 GlobalAlias::create(JumpTableEntryType, 0, F->getLinkage(), "",
1808 CombinedGlobalElemPtr, &M);
1809 FAlias->setVisibility(F->getVisibility());
1810 FAlias->takeName(F);
1811 if (FAlias->hasName()) {
1812 F->setName(FAlias->getName() + ".cfi");
1813 maybeReplaceComdat(F, FAlias->getName());
1814 }
1815 replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1816 if (!F->hasLocalLinkage())
1817 F->setVisibility(GlobalVariable::HiddenVisibility);
1818 }
1819 }
1820
1821 createJumpTable(JumpTableFn, Functions, JumpTableArch);
1822}
1823
1824/// Assign a dummy layout using an incrementing counter, tag each function
1825/// with its index represented as metadata, and lower each type test to an
1826/// integer range comparison. During generation of the indirect function call
1827/// table in the backend, it will assign the given indexes.
1828/// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1829/// been finalized.
1830void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1832 assert(!Functions.empty());
1833
1834 // Build consecutive monotonic integer ranges for each call target set
1835 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1836
1837 for (GlobalTypeMember *GTM : Functions) {
1838 Function *F = cast<Function>(GTM->getGlobal());
1839
1840 // Skip functions that are not address taken, to avoid bloating the table
1841 if (!F->hasAddressTaken())
1842 continue;
1843
1844 // Store metadata with the index for each function
1845 MDNode *MD = MDNode::get(F->getContext(),
1847 ConstantInt::get(Int64Ty, IndirectIndex))));
1848 F->setMetadata("wasm.index", MD);
1849
1850 // Assign the counter value
1851 GlobalLayout[GTM] = IndirectIndex++;
1852 }
1853
1854 // The indirect function table index space starts at zero, so pass a NULL
1855 // pointer as the subtracted "jump table" offset.
1856 lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(PtrTy),
1857 GlobalLayout);
1858}
1859
1860void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1862 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1863 DenseMap<Metadata *, uint64_t> TypeIdIndices;
1864 for (unsigned I = 0; I != TypeIds.size(); ++I)
1865 TypeIdIndices[TypeIds[I]] = I;
1866
1867 // For each type identifier, build a set of indices that refer to members of
1868 // the type identifier.
1869 std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1870 unsigned GlobalIndex = 0;
1871 DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1872 for (GlobalTypeMember *GTM : Globals) {
1873 for (MDNode *Type : GTM->types()) {
1874 // Type = { offset, type identifier }
1875 auto I = TypeIdIndices.find(Type->getOperand(1));
1876 if (I != TypeIdIndices.end())
1877 TypeMembers[I->second].insert(GlobalIndex);
1878 }
1879 GlobalIndices[GTM] = GlobalIndex;
1880 GlobalIndex++;
1881 }
1882
1883 for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1884 TypeMembers.emplace_back();
1885 std::set<uint64_t> &TMSet = TypeMembers.back();
1886 for (GlobalTypeMember *T : JT->targets())
1887 TMSet.insert(GlobalIndices[T]);
1888 }
1889
1890 // Order the sets of indices by size. The GlobalLayoutBuilder works best
1891 // when given small index sets first.
1892 llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1893 const std::set<uint64_t> &O2) {
1894 return O1.size() < O2.size();
1895 });
1896
1897 // Create a GlobalLayoutBuilder and provide it with index sets as layout
1898 // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1899 // close together as possible.
1900 GlobalLayoutBuilder GLB(Globals.size());
1901 for (auto &&MemSet : TypeMembers)
1902 GLB.addFragment(MemSet);
1903
1904 // Build a vector of globals with the computed layout.
1905 bool IsGlobalSet =
1906 Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1907 std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1908 auto OGTMI = OrderedGTMs.begin();
1909 for (auto &&F : GLB.Fragments) {
1910 for (auto &&Offset : F) {
1911 if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1912 report_fatal_error("Type identifier may not contain both global "
1913 "variables and functions");
1914 *OGTMI++ = Globals[Offset];
1915 }
1916 }
1917
1918 // Build the bitsets from this disjoint set.
1919 if (IsGlobalSet)
1920 buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1921 else
1922 buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1923}
1924
1925/// Lower all type tests in this module.
1926LowerTypeTestsModule::LowerTypeTestsModule(
1927 Module &M, ModuleAnalysisManager &AM, ModuleSummaryIndex *ExportSummary,
1928 const ModuleSummaryIndex *ImportSummary)
1929 : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary) {
1930 assert(!(ExportSummary && ImportSummary));
1931 Triple TargetTriple(M.getTargetTriple());
1932 Arch = TargetTriple.getArch();
1933 if (Arch == Triple::arm)
1934 CanUseArmJumpTable = true;
1935 if (Arch == Triple::arm || Arch == Triple::thumb) {
1936 auto &FAM =
1938 for (Function &F : M) {
1939 // Skip declarations since we should not query the TTI for them.
1940 if (F.isDeclaration())
1941 continue;
1942 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
1943 if (TTI.hasArmWideBranch(false))
1944 CanUseArmJumpTable = true;
1945 if (TTI.hasArmWideBranch(true))
1946 CanUseThumbBWJumpTable = true;
1947 }
1948 }
1949 OS = TargetTriple.getOS();
1950 ObjectFormat = TargetTriple.getObjectFormat();
1951
1952 // Function annotation describes or applies to function itself, and
1953 // shouldn't be associated with jump table thunk generated for CFI.
1954 GlobalAnnotation = M.getGlobalVariable("llvm.global.annotations");
1955 if (GlobalAnnotation && GlobalAnnotation->hasInitializer()) {
1956 const ConstantArray *CA =
1957 cast<ConstantArray>(GlobalAnnotation->getInitializer());
1958 FunctionAnnotations.insert_range(CA->operands());
1959 }
1960}
1961
1962bool LowerTypeTestsModule::runForTesting(Module &M, ModuleAnalysisManager &AM) {
1963 ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1964
1965 // Handle the command-line summary arguments. This code is for testing
1966 // purposes only, so we handle errors directly.
1967 if (!ClReadSummary.empty()) {
1968 ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1969 ": ");
1970 auto ReadSummaryFile = ExitOnErr(errorOrToExpected(
1971 MemoryBuffer::getFile(ClReadSummary, /*IsText=*/true)));
1972
1973 yaml::Input In(ReadSummaryFile->getBuffer());
1974 In >> Summary;
1975 ExitOnErr(errorCodeToError(In.error()));
1976 }
1977
1978 bool Changed =
1979 LowerTypeTestsModule(
1980 M, AM,
1981 ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1982 ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr)
1983 .lower();
1984
1985 if (!ClWriteSummary.empty()) {
1986 ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1987 ": ");
1988 std::error_code EC;
1989 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1990 ExitOnErr(errorCodeToError(EC));
1991
1992 yaml::Output Out(OS);
1993 Out << Summary;
1994 }
1995
1996 return Changed;
1997}
1998
1999static bool isDirectCall(Use& U) {
2000 auto *Usr = dyn_cast<CallInst>(U.getUser());
2001 return Usr && Usr->isCallee(&U);
2002}
2003
2004void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
2005 bool IsJumpTableCanonical) {
2006 SmallSetVector<Constant *, 4> Constants;
2007 for (Use &U : llvm::make_early_inc_range(Old->uses())) {
2008 // Skip no_cfi values, which refer to the function body instead of the jump
2009 // table.
2010 if (isa<NoCFIValue>(U.getUser()))
2011 continue;
2012
2013 // Skip direct calls to externally defined or non-dso_local functions.
2014 if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
2015 continue;
2016
2017 // Skip function annotation.
2018 if (isFunctionAnnotation(U.getUser()))
2019 continue;
2020
2021 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
2022 // constant because they are uniqued.
2023 if (auto *C = dyn_cast<Constant>(U.getUser())) {
2024 if (!isa<GlobalValue>(C)) {
2025 // Save unique users to avoid processing operand replacement
2026 // more than once.
2027 Constants.insert(C);
2028 continue;
2029 }
2030 }
2031
2032 U.set(New);
2033 }
2034
2035 // Process operand replacement of saved constants.
2036 for (auto *C : Constants)
2037 C->handleOperandChange(Old, New);
2038}
2039
2040void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
2042}
2043
2044static void dropTypeTests(Module &M, Function &TypeTestFunc,
2045 bool ShouldDropAll) {
2046 for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
2047 auto *CI = cast<CallInst>(U.getUser());
2048 // Find and erase llvm.assume intrinsics for this llvm.type.test call.
2049 for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
2050 if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
2051 Assume->eraseFromParent();
2052 // If the assume was merged with another assume, we might have a use on a
2053 // phi or select (which will feed the assume). Simply replace the use on
2054 // the phi/select with "true" and leave the merged assume.
2055 //
2056 // If ShouldDropAll is set, then we we need to update any remaining uses,
2057 // regardless of the instruction type.
2058 if (!CI->use_empty()) {
2059 assert(ShouldDropAll || all_of(CI->users(), [](User *U) -> bool {
2060 return isa<PHINode>(U) || isa<SelectInst>(U);
2061 }));
2062 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2063 }
2064 CI->eraseFromParent();
2065 }
2066}
2067
2068static bool dropTypeTests(Module &M, bool ShouldDropAll) {
2069 Function *TypeTestFunc =
2070 Intrinsic::getDeclarationIfExists(&M, Intrinsic::type_test);
2071 if (TypeTestFunc)
2072 dropTypeTests(M, *TypeTestFunc, ShouldDropAll);
2073 // Normally we'd have already removed all @llvm.public.type.test calls,
2074 // except for in the case where we originally were performing ThinLTO but
2075 // decided not to in the backend.
2076 Function *PublicTypeTestFunc =
2077 Intrinsic::getDeclarationIfExists(&M, Intrinsic::public_type_test);
2078 if (PublicTypeTestFunc)
2079 dropTypeTests(M, *PublicTypeTestFunc, ShouldDropAll);
2080 if (TypeTestFunc || PublicTypeTestFunc) {
2081 // We have deleted the type intrinsics, so we no longer have enough
2082 // information to reason about the liveness of virtual function pointers
2083 // in GlobalDCE.
2084 for (GlobalVariable &GV : M.globals())
2085 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2086 return true;
2087 }
2088 return false;
2089}
2090
2091bool LowerTypeTestsModule::lower() {
2092 Function *TypeTestFunc =
2093 Intrinsic::getDeclarationIfExists(&M, Intrinsic::type_test);
2094
2095 // If only some of the modules were split, we cannot correctly perform
2096 // this transformation. We already checked for the presense of type tests
2097 // with partially split modules during the thin link, and would have emitted
2098 // an error if any were found, so here we can simply return.
2099 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
2100 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2101 return false;
2102
2103 Function *ICallBranchFunnelFunc =
2104 Intrinsic::getDeclarationIfExists(&M, Intrinsic::icall_branch_funnel);
2105 if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
2106 (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
2107 !ExportSummary && !ImportSummary)
2108 return false;
2109
2110 if (ImportSummary) {
2111 if (TypeTestFunc)
2112 for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
2113 importTypeTest(cast<CallInst>(U.getUser()));
2114
2115 if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
2117 "unexpected call to llvm.icall.branch.funnel during import phase");
2118
2121 for (auto &F : M) {
2122 // CFI functions are either external, or promoted. A local function may
2123 // have the same name, but it's not the one we are looking for.
2124 if (F.hasLocalLinkage())
2125 continue;
2126 if (ImportSummary->cfiFunctionDefs().count(F.getName()))
2127 Defs.push_back(&F);
2128 else if (ImportSummary->cfiFunctionDecls().count(F.getName()))
2129 Decls.push_back(&F);
2130 }
2131
2132 {
2133 ScopedSaveAliaseesAndUsed S(M);
2134 for (auto *F : Defs)
2135 importFunction(F, /*isJumpTableCanonical*/ true);
2136 for (auto *F : Decls)
2137 importFunction(F, /*isJumpTableCanonical*/ false);
2138 }
2139
2140 return true;
2141 }
2142
2143 // Equivalence class set containing type identifiers and the globals that
2144 // reference them. This is used to partition the set of type identifiers in
2145 // the module into disjoint sets.
2146 using GlobalClassesTy = EquivalenceClasses<
2147 PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
2148 GlobalClassesTy GlobalClasses;
2149
2150 // Verify the type metadata and build a few data structures to let us
2151 // efficiently enumerate the type identifiers associated with a global:
2152 // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
2153 // of associated type metadata) and a mapping from type identifiers to their
2154 // list of GlobalTypeMembers and last observed index in the list of globals.
2155 // The indices will be used later to deterministically order the list of type
2156 // identifiers.
2158 struct TIInfo {
2159 unsigned UniqueId;
2160 std::vector<GlobalTypeMember *> RefGlobals;
2161 };
2162 DenseMap<Metadata *, TIInfo> TypeIdInfo;
2163 unsigned CurUniqueId = 0;
2165
2166 // Cross-DSO CFI emits jumptable entries for exported functions as well as
2167 // address taken functions in case they are address taken in other modules.
2168 const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
2169
2170 struct ExportedFunctionInfo {
2172 MDNode *FuncMD; // {name, linkage, type[, type...]}
2173 };
2174 MapVector<StringRef, ExportedFunctionInfo> ExportedFunctions;
2175 if (ExportSummary) {
2176 NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
2177 if (CfiFunctionsMD) {
2178 // A set of all functions that are address taken by a live global object.
2179 DenseSet<GlobalValue::GUID> AddressTaken;
2180 for (auto &I : *ExportSummary)
2181 for (auto &GVS : I.second.getSummaryList())
2182 if (GVS->isLive())
2183 for (const auto &Ref : GVS->refs()) {
2184 AddressTaken.insert(Ref.getGUID());
2185 for (auto &RefGVS : Ref.getSummaryList())
2186 if (auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
2187 AddressTaken.insert(Alias->getAliaseeGUID());
2188 }
2190 if (AddressTaken.count(GUID))
2191 return true;
2192 auto VI = ExportSummary->getValueInfo(GUID);
2193 if (!VI)
2194 return false;
2195 for (auto &I : VI.getSummaryList())
2196 if (auto Alias = dyn_cast<AliasSummary>(I.get()))
2197 if (AddressTaken.count(Alias->getAliaseeGUID()))
2198 return true;
2199 return false;
2200 };
2201 for (auto *FuncMD : CfiFunctionsMD->operands()) {
2202 assert(FuncMD->getNumOperands() >= 2);
2203 StringRef FunctionName =
2204 cast<MDString>(FuncMD->getOperand(0))->getString();
2206 cast<ConstantAsMetadata>(FuncMD->getOperand(1))
2207 ->getValue()
2208 ->getUniqueInteger()
2209 .getZExtValue());
2210 const GlobalValue::GUID GUID =
2213 // Do not emit jumptable entries for functions that are not-live and
2214 // have no live references (and are not exported with cross-DSO CFI.)
2215 if (!ExportSummary->isGUIDLive(GUID))
2216 continue;
2217 if (!IsAddressTaken(GUID)) {
2218 if (!CrossDsoCfi || Linkage != CFL_Definition)
2219 continue;
2220
2221 bool Exported = false;
2222 if (auto VI = ExportSummary->getValueInfo(GUID))
2223 for (const auto &GVS : VI.getSummaryList())
2224 if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
2225 Exported = true;
2226
2227 if (!Exported)
2228 continue;
2229 }
2230 auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
2231 if (!P.second && P.first->second.Linkage != CFL_Definition)
2232 P.first->second = {Linkage, FuncMD};
2233 }
2234
2235 for (const auto &P : ExportedFunctions) {
2236 StringRef FunctionName = P.first;
2237 CfiFunctionLinkage Linkage = P.second.Linkage;
2238 MDNode *FuncMD = P.second.FuncMD;
2239 Function *F = M.getFunction(FunctionName);
2240 if (F && F->hasLocalLinkage()) {
2241 // Locally defined function that happens to have the same name as a
2242 // function defined in a ThinLTO module. Rename it to move it out of
2243 // the way of the external reference that we're about to create.
2244 // Note that setName will find a unique name for the function, so even
2245 // if there is an existing function with the suffix there won't be a
2246 // name collision.
2247 F->setName(F->getName() + ".1");
2248 F = nullptr;
2249 }
2250
2251 if (!F)
2253 FunctionType::get(Type::getVoidTy(M.getContext()), false),
2254 GlobalVariable::ExternalLinkage,
2255 M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
2256
2257 // If the function is available_externally, remove its definition so
2258 // that it is handled the same way as a declaration. Later we will try
2259 // to create an alias using this function's linkage, which will fail if
2260 // the linkage is available_externally. This will also result in us
2261 // following the code path below to replace the type metadata.
2262 if (F->hasAvailableExternallyLinkage()) {
2263 F->setLinkage(GlobalValue::ExternalLinkage);
2264 F->deleteBody();
2265 F->setComdat(nullptr);
2266 F->clearMetadata();
2267 }
2268
2269 // Update the linkage for extern_weak declarations when a definition
2270 // exists.
2271 if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
2272 F->setLinkage(GlobalValue::ExternalLinkage);
2273
2274 // If the function in the full LTO module is a declaration, replace its
2275 // type metadata with the type metadata we found in cfi.functions. That
2276 // metadata is presumed to be more accurate than the metadata attached
2277 // to the declaration.
2278 if (F->isDeclaration()) {
2281
2282 F->eraseMetadata(LLVMContext::MD_type);
2283 for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2284 F->addMetadata(LLVMContext::MD_type,
2285 *cast<MDNode>(FuncMD->getOperand(I).get()));
2286 }
2287 }
2288 }
2289 }
2290
2291 struct AliasToCreate {
2292 Function *Alias;
2293 std::string TargetName;
2294 };
2295 std::vector<AliasToCreate> AliasesToCreate;
2296
2297 // Parse alias data to replace stand-in function declarations for aliases
2298 // with an alias to the intended target.
2299 if (ExportSummary) {
2300 if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2301 for (auto *AliasMD : AliasesMD->operands()) {
2303 for (Metadata *MD : AliasMD->operands()) {
2304 auto *MDS = dyn_cast<MDString>(MD);
2305 if (!MDS)
2306 continue;
2307 StringRef AliasName = MDS->getString();
2308 if (!ExportedFunctions.count(AliasName))
2309 continue;
2310 auto *AliasF = M.getFunction(AliasName);
2311 if (AliasF)
2312 Aliases.push_back(AliasF);
2313 }
2314
2315 if (Aliases.empty())
2316 continue;
2317
2318 for (unsigned I = 1; I != Aliases.size(); ++I) {
2319 auto *AliasF = Aliases[I];
2320 ExportedFunctions.erase(AliasF->getName());
2321 AliasesToCreate.push_back(
2322 {AliasF, std::string(Aliases[0]->getName())});
2323 }
2324 }
2325 }
2326 }
2327
2328 DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2329 for (GlobalObject &GO : M.global_objects()) {
2331 continue;
2332
2333 Types.clear();
2334 GO.getMetadata(LLVMContext::MD_type, Types);
2335
2336 bool IsJumpTableCanonical = false;
2337 bool IsExported = false;
2338 if (Function *F = dyn_cast<Function>(&GO)) {
2339 IsJumpTableCanonical = isJumpTableCanonical(F);
2340 if (auto It = ExportedFunctions.find(F->getName());
2341 It != ExportedFunctions.end()) {
2342 IsJumpTableCanonical |= It->second.Linkage == CFL_Definition;
2343 IsExported = true;
2344 // TODO: The logic here checks only that the function is address taken,
2345 // not that the address takers are live. This can be updated to check
2346 // their liveness and emit fewer jumptable entries once monolithic LTO
2347 // builds also emit summaries.
2348 } else if (!F->hasAddressTaken()) {
2349 if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2350 continue;
2351 }
2352 }
2353
2354 auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2355 IsExported, Types);
2356 GlobalTypeMembers[&GO] = GTM;
2357 for (MDNode *Type : Types) {
2358 verifyTypeMDNode(&GO, Type);
2359 auto &Info = TypeIdInfo[Type->getOperand(1)];
2360 Info.UniqueId = ++CurUniqueId;
2361 Info.RefGlobals.push_back(GTM);
2362 }
2363 }
2364
2365 auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2366 // Add the call site to the list of call sites for this type identifier. We
2367 // also use TypeIdUsers to keep track of whether we have seen this type
2368 // identifier before. If we have, we don't need to re-add the referenced
2369 // globals to the equivalence class.
2370 auto Ins = TypeIdUsers.insert({TypeId, {}});
2371 if (Ins.second) {
2372 // Add the type identifier to the equivalence class.
2373 auto &GCI = GlobalClasses.insert(TypeId);
2374 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2375
2376 // Add the referenced globals to the type identifier's equivalence class.
2377 for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2378 CurSet = GlobalClasses.unionSets(
2379 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2380 }
2381
2382 return Ins.first->second;
2383 };
2384
2385 if (TypeTestFunc) {
2386 for (const Use &U : TypeTestFunc->uses()) {
2387 auto CI = cast<CallInst>(U.getUser());
2388 // If this type test is only used by llvm.assume instructions, it
2389 // was used for whole program devirtualization, and is being kept
2390 // for use by other optimization passes. We do not need or want to
2391 // lower it here. We also don't want to rewrite any associated globals
2392 // unnecessarily. These will be removed by a subsequent LTT invocation
2393 // with the DropTypeTests flag set.
2394 bool OnlyAssumeUses = !CI->use_empty();
2395 for (const Use &CIU : CI->uses()) {
2396 if (isa<AssumeInst>(CIU.getUser()))
2397 continue;
2398 OnlyAssumeUses = false;
2399 break;
2400 }
2401 if (OnlyAssumeUses)
2402 continue;
2403
2404 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2405 if (!TypeIdMDVal)
2406 report_fatal_error("Second argument of llvm.type.test must be metadata");
2407 auto TypeId = TypeIdMDVal->getMetadata();
2408 AddTypeIdUse(TypeId).CallSites.push_back(CI);
2409 }
2410 }
2411
2412 if (ICallBranchFunnelFunc) {
2413 for (const Use &U : ICallBranchFunnelFunc->uses()) {
2414 if (Arch != Triple::x86_64)
2416 "llvm.icall.branch.funnel not supported on this target");
2417
2418 auto CI = cast<CallInst>(U.getUser());
2419
2420 std::vector<GlobalTypeMember *> Targets;
2421 if (CI->arg_size() % 2 != 1)
2422 report_fatal_error("number of arguments should be odd");
2423
2424 GlobalClassesTy::member_iterator CurSet;
2425 for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2426 int64_t Offset;
2428 CI->getOperand(I), Offset, M.getDataLayout()));
2429 if (!Base)
2431 "Expected branch funnel operand to be global value");
2432
2433 GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2434 Targets.push_back(GTM);
2435 GlobalClassesTy::member_iterator NewSet =
2436 GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2437 if (I == 1)
2438 CurSet = NewSet;
2439 else
2440 CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2441 }
2442
2443 GlobalClasses.unionSets(
2444 CurSet, GlobalClasses.findLeader(
2445 GlobalClasses.insert(ICallBranchFunnel::create(
2446 Alloc, CI, Targets, ++CurUniqueId))));
2447 }
2448 }
2449
2450 if (ExportSummary) {
2451 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2452 for (auto &P : TypeIdInfo) {
2453 if (auto *TypeId = dyn_cast<MDString>(P.first))
2455 TypeId->getString())]
2456 .push_back(TypeId);
2457 }
2458
2459 for (auto &P : *ExportSummary) {
2460 for (auto &S : P.second.getSummaryList()) {
2461 if (!ExportSummary->isGlobalValueLive(S.get()))
2462 continue;
2463 if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2464 for (GlobalValue::GUID G : FS->type_tests())
2465 for (Metadata *MD : MetadataByGUID[G])
2466 AddTypeIdUse(MD).IsExported = true;
2467 }
2468 }
2469 }
2470
2471 if (GlobalClasses.empty())
2472 return false;
2473
2474 {
2475 ScopedSaveAliaseesAndUsed S(M);
2476 // For each disjoint set we found...
2477 for (const auto &C : GlobalClasses) {
2478 if (!C->isLeader())
2479 continue;
2480
2481 ++NumTypeIdDisjointSets;
2482 // Build the list of type identifiers in this disjoint set.
2483 std::vector<Metadata *> TypeIds;
2484 std::vector<GlobalTypeMember *> Globals;
2485 std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2486 for (auto M : GlobalClasses.members(*C)) {
2487 if (isa<Metadata *>(M))
2488 TypeIds.push_back(cast<Metadata *>(M));
2489 else if (isa<GlobalTypeMember *>(M))
2490 Globals.push_back(cast<GlobalTypeMember *>(M));
2491 else
2492 ICallBranchFunnels.push_back(cast<ICallBranchFunnel *>(M));
2493 }
2494
2495 // Order type identifiers by unique ID for determinism. This ordering is
2496 // stable as there is a one-to-one mapping between metadata and unique
2497 // IDs.
2498 llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2499 return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2500 });
2501
2502 // Same for the branch funnels.
2503 llvm::sort(ICallBranchFunnels,
2504 [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2505 return F1->UniqueId < F2->UniqueId;
2506 });
2507
2508 // Build bitsets for this disjoint set.
2509 buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2510 }
2511 }
2512
2513 allocateByteArrays();
2514
2515 for (auto A : AliasesToCreate) {
2516 auto *Target = M.getNamedValue(A.TargetName);
2517 if (!isa<GlobalAlias>(Target))
2518 continue;
2519 auto *AliasGA = GlobalAlias::create("", Target);
2520 AliasGA->setVisibility(A.Alias->getVisibility());
2521 AliasGA->setLinkage(A.Alias->getLinkage());
2522 AliasGA->takeName(A.Alias);
2523 A.Alias->replaceAllUsesWith(AliasGA);
2524 A.Alias->eraseFromParent();
2525 }
2526
2527 // Emit .symver directives for exported functions, if they exist.
2528 if (ExportSummary) {
2529 if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2530 for (auto *Symver : SymversMD->operands()) {
2531 assert(Symver->getNumOperands() >= 2);
2532 StringRef SymbolName =
2533 cast<MDString>(Symver->getOperand(0))->getString();
2534 StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2535
2536 if (!ExportedFunctions.count(SymbolName))
2537 continue;
2538
2539 M.appendModuleInlineAsm(
2540 (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2541 }
2542 }
2543 }
2544
2545 return true;
2546}
2547
2550 bool Changed;
2551 if (UseCommandLine)
2552 Changed = LowerTypeTestsModule::runForTesting(M, AM);
2553 else
2554 Changed = LowerTypeTestsModule(M, AM, ExportSummary, ImportSummary).lower();
2555 if (!Changed)
2556 return PreservedAnalyses::all();
2557 return PreservedAnalyses::none();
2558}
2559
2561 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
2562 static_cast<PassInfoMixin<DropTypeTestsPass> *>(this)->printPipeline(
2563 OS, MapClassName2PassName);
2564 OS << '<';
2565 switch (Kind) {
2566 case DropTestKind::Assume:
2567 OS << "assume";
2568 break;
2569 case DropTestKind::All:
2570 OS << "all";
2571 break;
2572 }
2573 OS << '>';
2574}
2575
2580
2583 bool Changed = false;
2584 // Figure out whether inlining has exposed a constant address to a lowered
2585 // type test, and remove the test if so and the address is known to pass the
2586 // test. Unfortunately this pass ends up needing to reverse engineer what
2587 // LowerTypeTests did; this is currently inherent to the design of ThinLTO
2588 // importing where LowerTypeTests needs to run at the start.
2589 //
2590 // We look for things like:
2591 //
2592 // sub (i64 ptrtoint (ptr @_Z2fpv to i64), i64 ptrtoint (ptr
2593 // @__typeid__ZTSFvvE_global_addr to i64))
2594 //
2595 // which gets replaced with 0 if _Z2fpv (more specifically _Z2fpv.cfi, the
2596 // function referred to by the jump table) is a member of the type _ZTSFvv, as
2597 // well as things like
2598 //
2599 // icmp eq ptr @_Z2fpv, @__typeid__ZTSFvvE_global_addr
2600 //
2601 // which gets replaced with true if _Z2fpv is a member.
2602 for (auto &GV : M.globals()) {
2603 if (!GV.getName().starts_with("__typeid_") ||
2604 !GV.getName().ends_with("_global_addr"))
2605 continue;
2606 // __typeid_foo_global_addr -> foo
2607 auto *MD = MDString::get(M.getContext(),
2608 GV.getName().substr(9, GV.getName().size() - 21));
2609 auto MaySimplifyPtr = [&](Value *Ptr) {
2610 if (auto *GV = dyn_cast<GlobalValue>(Ptr))
2611 if (auto *CFIGV = M.getNamedValue((GV->getName() + ".cfi").str()))
2612 Ptr = CFIGV;
2613 return isKnownTypeIdMember(MD, M.getDataLayout(), Ptr, 0);
2614 };
2615 auto MaySimplifyInt = [&](Value *Op) {
2616 auto *PtrAsInt = dyn_cast<ConstantExpr>(Op);
2617 if (!PtrAsInt || PtrAsInt->getOpcode() != Instruction::PtrToInt)
2618 return false;
2619 return MaySimplifyPtr(PtrAsInt->getOperand(0));
2620 };
2621 for (User *U : make_early_inc_range(GV.users())) {
2622 if (auto *CI = dyn_cast<ICmpInst>(U)) {
2623 if (CI->getPredicate() == CmpInst::ICMP_EQ &&
2624 MaySimplifyPtr(CI->getOperand(0))) {
2625 // This is an equality comparison (TypeTestResolution::Single case in
2626 // lowerTypeTestCall). In this case we just replace the comparison
2627 // with true.
2628 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2629 CI->eraseFromParent();
2630 Changed = true;
2631 continue;
2632 }
2633 }
2634 auto *CE = dyn_cast<ConstantExpr>(U);
2635 if (!CE || CE->getOpcode() != Instruction::PtrToInt)
2636 continue;
2637 for (Use &U : make_early_inc_range(CE->uses())) {
2638 auto *CE = dyn_cast<ConstantExpr>(U.getUser());
2639 if (U.getOperandNo() == 0 && CE &&
2640 CE->getOpcode() == Instruction::Sub &&
2641 MaySimplifyInt(CE->getOperand(1))) {
2642 // This is a computation of PtrOffset as generated by
2643 // LowerTypeTestsModule::lowerTypeTestCall above. If
2644 // isKnownTypeIdMember passes we just pretend it evaluated to 0. This
2645 // should cause later passes to remove the range and alignment checks.
2646 // The bitset checks won't be removed but those are uncommon.
2647 CE->replaceAllUsesWith(ConstantInt::get(CE->getType(), 0));
2648 Changed = true;
2649 }
2650 auto *CI = dyn_cast<ICmpInst>(U.getUser());
2651 if (U.getOperandNo() == 1 && CI &&
2652 CI->getPredicate() == CmpInst::ICMP_EQ &&
2653 MaySimplifyInt(CI->getOperand(0))) {
2654 // This is an equality comparison. Unlike in the case above it
2655 // remained as an integer compare.
2656 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2657 CI->eraseFromParent();
2658 Changed = true;
2659 }
2660 }
2661 }
2662 }
2663
2664 if (!Changed)
2665 return PreservedAnalyses::all();
2669 PA.preserve<LoopAnalysis>();
2670 return PA;
2671}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static const unsigned kARMJumpTableEntrySize
static const unsigned kLOONGARCH64JumpTableEntrySize
static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL, Value *V, uint64_t COffset)
static const unsigned kX86IBTJumpTableEntrySize
static SmallVector< DILocation * > createJumpTableDebugInfo(Function *F, ArrayRef< GlobalTypeMember * > Functions)
static cl::opt< std::string > ClReadSummary("lowertypetests-read-summary", cl::desc("Read summary from given YAML file before running pass"), cl::Hidden)
static const unsigned kRISCVJumpTableEntrySize
static auto buildBitSets(ArrayRef< Metadata * > TypeIds, const DenseMap< GlobalTypeMember *, uint64_t > &GlobalLayout)
static void dropTypeTests(Module &M, Function &TypeTestFunc, bool ShouldDropAll)
static Value * createMaskedBitTest(IRBuilder<> &B, Value *Bits, Value *BitOffset)
Build a test that bit BitOffset mod sizeof(Bits)*8 is set in Bits.
static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch)
static const unsigned kX86JumpTableEntrySize
static cl::opt< bool > AvoidReuse("lowertypetests-avoid-reuse", cl::desc("Try to avoid reuse of byte array addresses using aliases"), cl::Hidden, cl::init(true))
static cl::opt< PassSummaryAction > ClSummaryAction("lowertypetests-summary-action", cl::desc("What to do with the summary when running this pass"), cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"), clEnumValN(PassSummaryAction::Import, "import", "Import typeid resolutions from summary and globals"), clEnumValN(PassSummaryAction::Export, "export", "Export typeid resolutions to summary and globals")), cl::Hidden)
static const unsigned kARMBTIJumpTableEntrySize
static cl::opt< bool > EnableJumpTableDebugInfo("lowertypetests-jump-table-debug-info", cl::init(true), cl::Hidden, cl::desc("Enable debug info generation for jump tables"))
static cl::opt< std::string > ClWriteSummary("lowertypetests-write-summary", cl::desc("Write summary to given YAML file after running pass"), cl::Hidden)
static BitSetInfo buildBitSet(ArrayRef< uint64_t > Offsets)
Build a bit set for list of offsets.
static bool isDirectCall(Use &U)
static const unsigned kARMv6MJumpTableEntrySize
static const unsigned kHexagonJumpTableEntrySize
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
#define T
ModuleSummaryIndex.h This file contains the declarations the classes that hold the module index and s...
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerUnion class, which is a discriminated union of pointer types.
This file contains the declarations for profiling metadata utility functions.
static StringRef getName(Value *V)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
This header defines support for implementing classes that have some trailing object (or arrays of obj...
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
size_t count(StringRef S) const
@ ICMP_NE
not equal
Definition InstrTypes.h:698
static CondBrInst * Create(Value *Cond, BasicBlock *IfTrue, BasicBlock *IfFalse, InsertPosition InsertBefore=nullptr)
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
ConstantArray - Constant Array Declarations.
Definition Constants.h:579
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:537
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:865
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList)
Create an "inbounds" getelementptr.
Definition Constants.h:1488
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
Definition Constants.h:1478
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInBoundsPtrAdd(Constant *Ptr, Constant *Offset)
Create a getelementptr inbounds i8, ptr, offset constant expression.
Definition Constants.h:1505
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:632
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI void finalize()
Construct any deferred debug info descriptors.
Definition DIBuilder.cpp:75
LLVM_ABI DISubroutineType * createSubroutineType(DITypeArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DICompileUnit * createCompileUnit(DISourceLanguageName Lang, DIFile *File, StringRef Producer, bool isOptimized, StringRef Flags, unsigned RV, StringRef SplitName=StringRef(), DICompileUnit::DebugEmissionKind Kind=DICompileUnit::DebugEmissionKind::FullDebug, uint64_t DWOId=0, bool SplitDebugInlining=true, bool DebugInfoForProfiling=false, DICompileUnit::DebugNameTableKind NameTableKind=DICompileUnit::DebugNameTableKind::Default, bool RangesBaseAddress=false, StringRef SysRoot={}, StringRef SDK={})
A CompileUnit provides an anchor for all debugging information generated during this instance of comp...
LLVM_ABI DIFile * createFile(StringRef Filename, StringRef Directory, std::optional< DIFile::ChecksumInfo< StringRef > > Checksum=std::nullopt, std::optional< StringRef > Source=std::nullopt)
Create a file descriptor to hold debugging information for a file.
Wrapper structure that holds a language name and its version.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Function.cpp:449
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:621
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:223
const Comdat * getComdat() const
LLVM_ABI bool eraseMetadata(unsigned KindID)
Erase all metadata attachments with the given kind.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this GlobalObject.
bool hasSection() const
Check if this global has a custom object file section.
static LLVM_ABI GUID getGUIDAssumingExternalLinkage(StringRef GlobalName)
Return a 64-bit global unique ID constructed from the name of a global symbol.
Definition Globals.cpp:80
bool isDSOLocal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
static bool isLocalLinkage(LinkageTypes Linkage)
LinkageTypes getLinkage() const
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool isDeclarationForLinker() const
PointerType * getType() const
Global values are always pointers.
VisibilityTypes
An enumeration for the kinds of visibility of global values.
Definition GlobalValue.h:67
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition Globals.cpp:542
MaybeAlign getAlign() const
Returns the alignment of the given variable.
void setConstant(bool Val)
LLVM_ABI void setCodeModel(CodeModel::Model CM)
Change the code model for this global.
Definition Globals.cpp:589
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:538
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
uint64_t getBitMask() const
Return a bitmask with ones set for all of the bits that can be set by an unsigned version of this typ...
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:587
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Metadata * get() const
Definition Metadata.h:931
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:126
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
Root of the metadata hierarchy.
Definition Metadata.h:64
TypeIdSummary & getOrInsertTypeIdSummary(StringRef TypeId)
Return an existing or new TypeIdSummary entry for TypeId.
const TypeIdSummary * getTypeIdSummary(StringRef TypeId) const
This returns either a pointer to the type id summary (if present in the summary map) or null (if not ...
CfiFunctionIndex & cfiFunctionDecls()
CfiFunctionIndex & cfiFunctionDefs()
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
iterator_range< op_iterator > operands()
Definition Metadata.h:1856
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:730
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:685
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:591
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:258
constexpr size_t size() const
Get the string size.
Definition StringRef.h:144
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:270
Type * getElementType(unsigned N) const
Analysis pass providing the TargetTransformInfo.
See the file comment for details on the usage of the TrailingObjects type.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
@ loongarch64
Definition Triple.h:65
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
iterator_range< user_iterator > users()
Definition Value.h:426
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
LLVM_ABI bool replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:557
iterator_range< use_iterator > uses()
Definition Value.h:380
bool hasName() const
Definition Value.h:261
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:180
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
LLVM_ABI bool isJumpTableCanonical(Function *F)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:683
SmallVector< unsigned char, 0 > ByteArray
Definition PropertySet.h:25
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition FileSystem.h:786
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2115
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
detail::zip_longest_range< T, U, Args... > zip_longest(T &&t, U &&u, Args &&... args)
Iterate over two or more iterators at the same time.
Definition STLExtras.h:981
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:328
@ Export
Export information to summary.
Definition IPO.h:57
@ None
Do nothing.
Definition IPO.h:55
@ Import
Import information from summary.
Definition IPO.h:56
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI bool convertUsersOfConstantsToInstructions(ArrayRef< Constant * > Consts, Function *RestrictToFunc=nullptr, bool RemoveDeadConstants=true, bool IncludeSelf=false)
Replace constant expressions users of the given constants with instructions.
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
DWARFExpression::Operation Op
Expected< T > errorOrToExpected(ErrorOr< T > &&EO)
Convert an ErrorOr<T> to an Expected<T>.
Definition Error.h:1261
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1884
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
Definition Error.cpp:107
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
CfiFunctionLinkage
The type of CFI jumptable needed for a function.
@ CFL_WeakDeclaration
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI GlobalVariable * collectUsedGlobalVariables(const Module &M, SmallVectorImpl< GlobalValue * > &Vec, bool CompilerUsed)
Given "llvm.used" or "llvm.compiler.used" as a global name, collect the initializer elements of that ...
Definition Module.cpp:875
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:89
TypeTestResolution TTRes
Kind
Specifies which kind of type check we should emit for this byte array.
@ Unknown
Unknown (analysis not performed, don't lower)
@ Single
Single element (last example in "Short Inline Bit Vectors")
@ Inline
Inlined bit vector ("Short Inline Bit Vectors")
@ Unsat
Unsatisfiable type (i.e. no global has this type metadata)
@ AllOnes
All-ones bit vector ("Eliminating Bit Vector Checks for All-Ones Bit Vectors")
@ ByteArray
Test a byte array (first example)
unsigned SizeM1BitWidth
Range of size-1 expressed as a bit width.
enum llvm::TypeTestResolution::Kind TheKind
SmallVector< uint64_t, 16 > Offsets
LLVM_ABI bool containsGlobalOffset(uint64_t Offset) const
LLVM_ABI void print(raw_ostream &OS) const
This class is used to build a byte array containing overlapping bit sets.
uint64_t BitAllocs[BitsPerByte]
The number of bytes allocated so far for each of the bits.
std::vector< uint8_t > Bytes
The byte array built so far.
LLVM_ABI void allocate(const std::set< uint64_t > &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask)
Allocate BitSize bits in the byte array where Bits contains the bits to set.
This class implements a layout algorithm for globals referenced by bit sets that tries to keep member...
std::vector< std::vector< uint64_t > > Fragments
The computed layout.
LLVM_ABI void addFragment(const std::set< uint64_t > &F)
Add F to the layout while trying to keep its indices contiguous.
std::vector< uint64_t > FragmentMap
Mapping from object index to fragment index.