95#define DEBUG_TYPE "asan"
101 std::numeric_limits<uint64_t>::max();
142 "__asan_unregister_image_globals";
155 "__asan_stack_malloc_always_";
169 "__asan_option_detect_stack_use_after_return";
172 "__asan_shadow_memory_dynamic_address";
198 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
203 cl::desc(
"Enable recovery mode (continue-after-error)."),
207 "asan-guard-against-version-mismatch",
213 cl::desc(
"instrument read instructions"),
217 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
226 "asan-instrument-atomics",
236 "asan-always-slow-path",
241 "asan-force-dynamic-shadow",
242 cl::desc(
"Load shadow address into a local variable for each function"),
247 cl::desc(
"Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
252 "asan-with-ifunc-suppress-remat",
253 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
262 "asan-max-ins-per-bb",
cl::init(10000),
263 cl::desc(
"maximal number of instructions to instrument in any given BB"),
270 "asan-max-inline-poisoning-size",
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
276 "asan-use-after-return",
277 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
290 cl::desc(
"Create redzones for byval "
291 "arguments (extra copy "
296 cl::desc(
"Check stack-use-after-scope"),
305 cl::desc(
"Handle C++ initializer order"),
309 "asan-detect-invalid-pointer-pair",
314 "asan-detect-invalid-pointer-cmp",
319 "asan-detect-invalid-pointer-sub",
324 "asan-realign-stack",
325 cl::desc(
"Realign stack to the value of this flag (power of two)"),
329 "asan-instrumentation-with-call-threshold",
330 cl::desc(
"If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
336 "asan-memory-access-callback-prefix",
341 "asan-kernel-mem-intrinsic-prefix",
347 cl::desc(
"instrument dynamic allocas"),
351 "asan-skip-promotable-allocas",
356 "asan-constructor-kind",
357 cl::desc(
"Sets the ASan constructor kind"),
360 "Use global constructors")),
367 cl::desc(
"scale of asan shadow mapping"),
372 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
386 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
390 cl::desc(
"Don't instrument scalar globals"),
394 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
398 "asan-stack-dynamic-alloca",
403 "asan-force-experiment",
409 cl::desc(
"Use private aliases for global variables"),
414 cl::desc(
"Use odr indicators to improve ODR reporting"),
419 cl::desc(
"Use linker features to support dead "
420 "code stripping of globals"),
427 cl::desc(
"Place ASan constructors in comdat sections"),
431 "asan-destructor-kind",
432 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
436 "Use global destructors")),
456STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
459 "Number of optimized accesses to global vars");
461 "Number of optimized accesses to stack vars");
470struct ShadowMapping {
481 bool IsAndroid = TargetTriple.
isAndroid();
484 bool IsMacOS = TargetTriple.
isMacOSX();
487 bool IsPS = TargetTriple.
isPS();
493 bool IsMIPSN32ABI = TargetTriple.
isABIN32();
494 bool IsMIPS32 = TargetTriple.
isMIPS32();
495 bool IsMIPS64 = TargetTriple.
isMIPS64();
496 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
503 bool IsAMDGPU = TargetTriple.
isAMDGPU();
505 bool IsWasm = TargetTriple.
isWasm();
507 ShadowMapping Mapping;
514 if (LongSize == 32) {
517 else if (IsMIPSN32ABI)
542 else if (IsFreeBSD && IsAArch64)
544 else if (IsFreeBSD && !IsMIPS64) {
549 }
else if (IsNetBSD) {
556 else if (IsLinux && IsX86_64) {
562 }
else if (IsWindows && IsX86_64) {
568 else if (IsMacOS && IsAArch64)
572 else if (IsLoongArch64)
579 else if (IsHaiku && IsX86_64)
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
603 bool IsAndroidWithIfuncSupport =
605 Mapping.InGlobal =
ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
613 int *MappingScale,
bool *OrShadowOffset) {
615 *ShadowBase = Mapping.Offset;
616 *MappingScale = Mapping.Scale;
617 *OrShadowOffset = Mapping.OrShadowOffset;
636 if (!
F.doesNotAccessMemory()) {
637 bool WritesMemory = !
F.onlyReadsMemory();
638 bool ReadsMemory = !
F.onlyWritesMemory();
639 if ((WritesMemory && !ReadsMemory) ||
F.onlyAccessesArgMemory()) {
640 F.removeFnAttr(Attribute::Memory);
646 if (
A.hasAttribute(Attribute::WriteOnly)) {
647 A.removeAttr(Attribute::WriteOnly);
655 F.addFnAttr(Attribute::NoBuiltin);
678 return std::max(32U, 1U << MappingScale);
697class RuntimeCallInserter {
699 bool TrackInsertedCalls =
false;
703 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
705 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
706 if (isScopedEHPersonality(Personality))
707 TrackInsertedCalls = true;
711 ~RuntimeCallInserter() {
712 if (InsertedCalls.empty())
714 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
716 DenseMap<BasicBlock *, ColorVector> BlockColors =
colorEHFunclets(*OwnerFn);
717 for (CallInst *CI : InsertedCalls) {
719 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
721 "Instruction doesn't belong to the expected Function!");
729 if (Colors.
size() != 1) {
730 OwnerFn->getContext().emitError(
731 "Instruction's BasicBlock is not monochromatic");
738 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
742 OB, CI->getIterator());
743 NewCall->copyMetadata(*CI);
744 CI->replaceAllUsesWith(NewCall);
745 CI->eraseFromParent();
750 CallInst *createRuntimeCall(
IRBuilder<> &IRB, FunctionCallee Callee,
752 const Twine &
Name =
"") {
755 CallInst *Inst = IRB.
CreateCall(Callee, Args, Name,
nullptr);
756 if (TrackInsertedCalls)
757 InsertedCalls.push_back(Inst);
763struct AddressSanitizer {
764 AddressSanitizer(
Module &M,
const StackSafetyGlobalInfo *SSGI,
765 int InstrumentationWithCallsThreshold,
766 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
767 bool Recover =
false,
bool UseAfterScope =
false,
769 AsanDetectStackUseAfterReturnMode::Runtime)
778 InstrumentationWithCallsThreshold(
781 : InstrumentationWithCallsThreshold),
784 : MaxInlinePoisoningSize) {
785 C = &(M.getContext());
786 DL = &M.getDataLayout();
787 LongSize = M.getDataLayout().getPointerSizeInBits();
788 IntptrTy = Type::getIntNTy(*C, LongSize);
789 PtrTy = PointerType::getUnqual(*C);
790 Int32Ty = Type::getInt32Ty(*C);
791 TargetTriple = M.getTargetTriple();
795 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
803 bool isInterestingAlloca(
const AllocaInst &AI);
805 bool ignoreAccess(Instruction *Inst,
Value *
Ptr);
807 Instruction *
I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
808 const TargetTransformInfo *
TTI);
810 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
811 InterestingMemoryOperand &O,
bool UseCalls,
812 const DataLayout &DL, RuntimeCallInserter &RTCI);
813 void instrumentPointerComparisonOrSubtraction(Instruction *
I,
814 RuntimeCallInserter &RTCI);
816 Value *Addr, MaybeAlign Alignment,
817 uint32_t TypeStoreSize,
bool IsWrite,
818 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
819 RuntimeCallInserter &RTCI);
820 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
821 Instruction *InsertBefore,
Value *Addr,
822 uint32_t TypeStoreSize,
bool IsWrite,
823 Value *SizeArgument);
826 void instrumentUnusualSizeOrAlignment(Instruction *
I,
827 Instruction *InsertBefore,
Value *Addr,
828 TypeSize TypeStoreSize,
bool IsWrite,
829 Value *SizeArgument,
bool UseCalls,
831 RuntimeCallInserter &RTCI);
832 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
835 MaybeAlign Alignment,
unsigned Granularity,
836 Type *OpType,
bool IsWrite,
837 Value *SizeArgument,
bool UseCalls,
838 uint32_t Exp, RuntimeCallInserter &RTCI);
840 Value *ShadowValue, uint32_t TypeStoreSize);
842 bool IsWrite,
size_t AccessSizeIndex,
843 Value *SizeArgument, uint32_t Exp,
844 RuntimeCallInserter &RTCI);
845 void instrumentMemIntrinsic(MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
847 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
848 bool instrumentFunction(Function &
F,
const TargetLibraryInfo *TLI,
849 const TargetTransformInfo *
TTI);
850 bool maybeInsertAsanInitAtFunctionEntry(Function &
F);
851 bool maybeInsertDynamicShadowAtFunctionEntry(Function &
F);
852 void markEscapedLocalAllocas(Function &
F);
855 friend struct FunctionStackPoisoner;
857 void initializeCallbacks(
const TargetLibraryInfo *TLI);
859 bool LooksLikeCodeInBug11395(Instruction *
I);
860 bool GlobalIsLinkerInitialized(GlobalVariable *
G);
861 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
Value *Addr,
862 TypeSize TypeStoreSize)
const;
865 struct FunctionStateRAII {
866 AddressSanitizer *Pass;
868 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
869 assert(Pass->ProcessedAllocas.empty() &&
870 "last pass forgot to clear cache");
871 assert(!Pass->LocalDynamicShadow);
874 ~FunctionStateRAII() {
875 Pass->LocalDynamicShadow =
nullptr;
876 Pass->ProcessedAllocas.clear();
882 const DataLayout *DL;
892 ShadowMapping Mapping;
893 FunctionCallee AsanHandleNoReturnFunc;
894 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
902 FunctionCallee AsanErrorCallbackSized[2][2];
903 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
905 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
906 Value *LocalDynamicShadow =
nullptr;
907 const StackSafetyGlobalInfo *SSGI;
908 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
910 FunctionCallee AMDGPUAddressShared;
911 FunctionCallee AMDGPUAddressPrivate;
912 int InstrumentationWithCallsThreshold;
913 uint32_t MaxInlinePoisoningSize;
916class ModuleAddressSanitizer {
918 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
919 bool CompileKernel =
false,
bool Recover =
false,
920 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
928 : InsertVersionCheck),
930 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
945 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
946 DestructorKind(DestructorKind),
950 C = &(M.getContext());
951 int LongSize = M.getDataLayout().getPointerSizeInBits();
952 IntptrTy = Type::getIntNTy(*C, LongSize);
953 PtrTy = PointerType::getUnqual(*C);
954 TargetTriple = M.getTargetTriple();
959 assert(this->DestructorKind != AsanDtorKind::Invalid);
962 bool instrumentModule();
965 void initializeCallbacks();
967 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
974 const std::string &UniqueModuleId);
979 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
983 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
984 StringRef OriginalName);
985 void SetComdatForGlobalMetadata(GlobalVariable *
G, GlobalVariable *
Metadata,
986 StringRef InternalSuffix);
989 const GlobalVariable *getExcludedAliasedGlobal(
const GlobalAlias &GA)
const;
990 bool shouldInstrumentGlobal(GlobalVariable *
G)
const;
991 bool ShouldUseMachOGlobalsSection()
const;
992 StringRef getGlobalMetadataSection()
const;
993 void poisonOneInitializer(Function &GlobalInit);
994 void createInitializerPoisonCalls();
995 uint64_t getMinRedzoneSizeForGlobal()
const {
999 int GetAsanVersion()
const;
1000 GlobalVariable *getOrCreateModuleName();
1004 bool InsertVersionCheck;
1007 bool UsePrivateAlias;
1008 bool UseOdrIndicator;
1015 Triple TargetTriple;
1016 ShadowMapping Mapping;
1017 FunctionCallee AsanPoisonGlobals;
1018 FunctionCallee AsanUnpoisonGlobals;
1019 FunctionCallee AsanRegisterGlobals;
1020 FunctionCallee AsanUnregisterGlobals;
1021 FunctionCallee AsanRegisterImageGlobals;
1022 FunctionCallee AsanUnregisterImageGlobals;
1023 FunctionCallee AsanRegisterElfGlobals;
1024 FunctionCallee AsanUnregisterElfGlobals;
1026 Function *AsanCtorFunction =
nullptr;
1027 Function *AsanDtorFunction =
nullptr;
1028 GlobalVariable *ModuleName =
nullptr;
1040struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
1042 AddressSanitizer &ASan;
1043 RuntimeCallInserter &RTCI;
1048 ShadowMapping Mapping;
1052 SmallVector<Instruction *, 8> RetVec;
1056 FunctionCallee AsanSetShadowFunc[0x100] = {};
1057 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1058 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1061 struct AllocaPoisonCall {
1062 IntrinsicInst *InsBefore;
1072 AllocaInst *DynamicAllocaLayout =
nullptr;
1073 IntrinsicInst *LocalEscapeCall =
nullptr;
1075 bool HasInlineAsm =
false;
1076 bool HasReturnsTwiceCall =
false;
1079 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1080 RuntimeCallInserter &RTCI)
1081 : F(F), ASan(ASan), RTCI(RTCI),
1083 IntptrTy(ASan.IntptrTy),
1085 Mapping(ASan.Mapping),
1093 copyArgsPassedByValToAllocas();
1098 if (AllocaVec.empty() && DynamicAllocaVec.empty())
return false;
1100 initializeCallbacks(*F.getParent());
1102 processDynamicAllocas();
1103 processStaticAllocas();
1114 void copyArgsPassedByValToAllocas();
1119 void processStaticAllocas();
1120 void processDynamicAllocas();
1122 void createDynamicAllocasInitStorage();
1127 void visitReturnInst(ReturnInst &RI) {
1128 if (CallInst *CI = RI.
getParent()->getTerminatingMustTailCall())
1129 RetVec.push_back(CI);
1131 RetVec.push_back(&RI);
1135 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1138 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1140 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1141 Value *SavedStack) {
1150 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1156 RTCI.createRuntimeCall(
1157 IRB, AsanAllocasUnpoisonFunc,
1158 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1162 void unpoisonDynamicAllocas() {
1163 for (Instruction *Ret : RetVec)
1164 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1166 for (Instruction *StackRestoreInst : StackRestoreVec)
1167 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1168 StackRestoreInst->getOperand(0));
1181 void handleDynamicAllocaCall(AllocaInst *AI);
1184 void visitAllocaInst(AllocaInst &AI) {
1189 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1193 if (AllocaVec.empty())
1196 StaticAllocasToMoveUp.push_back(&AI);
1202 DynamicAllocaVec.push_back(&AI);
1204 AllocaVec.push_back(&AI);
1209 void visitIntrinsicInst(IntrinsicInst &
II) {
1211 if (
ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&
II);
1212 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1213 if (!ASan.UseAfterScope)
1215 if (!
II.isLifetimeStartOrEnd())
1220 if (!AI || !ASan.isInterestingAlloca(*AI))
1230 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1231 AllocaPoisonCall APC = {&
II, AI, *
Size, DoPoison};
1233 StaticAllocaPoisonCallVec.push_back(APC);
1235 DynamicAllocaPoisonCallVec.push_back(APC);
1238 void visitCallBase(CallBase &CB) {
1240 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1241 HasReturnsTwiceCall |= CI->canReturnTwice();
1246 void initializeCallbacks(
Module &M);
1251 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1253 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1256 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1257 ArrayRef<uint8_t> ShadowBytes,
size_t Begin,
1262 Value *createAllocaForLayout(
IRBuilder<> &IRB,
const ASanStackFrameLayout &L,
1265 Instruction *ThenTerm,
Value *ValueIfFalse);
1273 OS, MapClassName2PassName);
1275 if (Options.CompileKernel)
1277 if (Options.UseAfterScope)
1278 OS <<
"use-after-scope";
1286 : Options(Options), UseGlobalGC(UseGlobalGC),
1287 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1288 ConstructorKind(ConstructorKind) {}
1297 ModuleAddressSanitizer ModuleSanitizer(
1298 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1299 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1311 if (
F.getName().starts_with(
"__asan_"))
1313 if (
F.isPresplitCoroutine())
1315 AddressSanitizer FunctionSanitizer(
1316 M, SSGI, Options.InstrumentationWithCallsThreshold,
1317 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1318 Options.UseAfterScope, Options.UseAfterReturn);
1321 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI, &
TTI);
1323 Modified |= ModuleSanitizer.instrumentModule();
1344 if (
G->getName().starts_with(
"llvm.") ||
1346 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1348 G->getName().starts_with(
"__llvm_rtti_proxy"))
1363 if (AddrSpace == 3 || AddrSpace == 5)
1370 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1371 if (Mapping.Offset == 0)
return Shadow;
1374 if (LocalDynamicShadow)
1375 ShadowBase = LocalDynamicShadow;
1377 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1378 if (Mapping.OrShadowOffset)
1379 return IRB.
CreateOr(Shadow, ShadowBase);
1381 return IRB.
CreateAdd(Shadow, ShadowBase);
1386 RuntimeCallInserter &RTCI) {
1389 RTCI.createRuntimeCall(
1395 RTCI.createRuntimeCall(
1401 MI->eraseFromParent();
1405bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1406 auto [It,
Inserted] = ProcessedAllocas.try_emplace(&AI);
1409 return It->getSecond();
1411 bool IsInteresting =
1424 !(SSGI && SSGI->
isSafe(AI)));
1426 It->second = IsInteresting;
1427 return IsInteresting;
1441 if (
Ptr->isSwiftError())
1458void AddressSanitizer::getInterestingMemoryOperands(
1462 if (LocalDynamicShadow ==
I)
1468 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1469 LI->getType(), LI->getAlign());
1474 SI->getValueOperand()->getType(),
SI->getAlign());
1478 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1479 RMW->getValOperand()->getType(), std::nullopt);
1483 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1484 XCHG->getCompareOperand()->getType(),
1487 switch (CI->getIntrinsicID()) {
1488 case Intrinsic::masked_load:
1489 case Intrinsic::masked_store:
1490 case Intrinsic::masked_gather:
1491 case Intrinsic::masked_scatter: {
1492 bool IsWrite = CI->getType()->isVoidTy();
1494 unsigned OpOffset = IsWrite ? 1 : 0;
1498 auto BasePtr = CI->getOperand(OpOffset);
1499 if (ignoreAccess(
I, BasePtr))
1501 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1505 Alignment =
Op->getMaybeAlignValue();
1506 Value *
Mask = CI->getOperand(2 + OpOffset);
1507 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1510 case Intrinsic::masked_expandload:
1511 case Intrinsic::masked_compressstore: {
1512 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1513 unsigned OpOffset = IsWrite ? 1 : 0;
1516 auto BasePtr = CI->getOperand(OpOffset);
1517 if (ignoreAccess(
I, BasePtr))
1520 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1523 Value *
Mask = CI->getOperand(1 + OpOffset);
1526 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1527 Value *EVL =
IB.CreateAddReduce(ExtMask);
1528 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1529 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1533 case Intrinsic::vp_load:
1534 case Intrinsic::vp_store:
1535 case Intrinsic::experimental_vp_strided_load:
1536 case Intrinsic::experimental_vp_strided_store: {
1538 unsigned IID = CI->getIntrinsicID();
1539 bool IsWrite = CI->getType()->isVoidTy();
1542 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1543 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1544 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1545 Value *Stride =
nullptr;
1546 if (IID == Intrinsic::experimental_vp_strided_store ||
1547 IID == Intrinsic::experimental_vp_strided_load) {
1548 Stride = VPI->getOperand(PtrOpNo + 1);
1555 Alignment =
Align(1);
1557 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1558 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1562 case Intrinsic::vp_gather:
1563 case Intrinsic::vp_scatter: {
1565 unsigned IID = CI->getIntrinsicID();
1566 bool IsWrite = IID == Intrinsic::vp_scatter;
1569 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1570 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1571 MaybeAlign Alignment = VPI->getPointerAlignment();
1572 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1573 VPI->getMaskParam(),
1574 VPI->getVectorLengthParam());
1580 if (
TTI->getTgtMemIntrinsic(
II, IntrInfo))
1584 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1586 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1588 Type *Ty = CI->getParamByValType(ArgNo);
1604 if (!Cmp->isRelational())
1618 if (BO->getOpcode() != Instruction::Sub)
1631 if (!
G->hasInitializer())
1634 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1640void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1644 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1645 for (
Value *&i : Param) {
1646 if (i->getType()->isPointerTy())
1649 RTCI.createRuntimeCall(IRB,
F, Param);
1655 TypeSize TypeStoreSize,
bool IsWrite,
1656 Value *SizeArgument,
bool UseCalls,
1657 uint32_t Exp, RuntimeCallInserter &RTCI) {
1662 switch (FixedSize) {
1668 if (!Alignment || *Alignment >= Granularity ||
1669 *Alignment >= FixedSize / 8)
1670 return Pass->instrumentAddress(
I, InsertBefore, Addr, Alignment,
1671 FixedSize, IsWrite,
nullptr, UseCalls,
1675 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore, Addr, TypeStoreSize,
1676 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1679void AddressSanitizer::instrumentMaskedLoadOrStore(
1682 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1683 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
1684 RuntimeCallInserter &RTCI) {
1686 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1687 auto Zero = ConstantInt::get(IntptrTy, 0);
1695 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1697 IB.SetInsertPoint(LoopInsertBefore);
1699 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1702 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1703 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1705 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1710 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1714 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1715 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1716 if (MaskElemC->isZero())
1722 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1723 MaskElem, &*IRB.GetInsertPoint(), false);
1724 IRB.SetInsertPoint(ThenTerm);
1727 Value *InstrumentedAddress;
1730 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1731 "Expected vector of pointer.");
1732 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1733 }
else if (Stride) {
1740 Alignment, Granularity, ElemTypeSize, IsWrite,
1741 SizeArgument, UseCalls, Exp, RTCI);
1748 RuntimeCallInserter &RTCI) {
1749 Value *Addr =
O.getPtr();
1769 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1770 NumOptimizedAccessesToGlobalVar++;
1778 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1779 NumOptimizedAccessesToStackVar++;
1785 NumInstrumentedWrites++;
1787 NumInstrumentedReads++;
1789 unsigned Granularity = 1 << Mapping.Scale;
1791 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1792 O.MaybeStride,
O.getInsn(), Addr,
O.Alignment,
1793 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1794 UseCalls, Exp, RTCI);
1797 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1798 UseCalls, Exp, RTCI);
1803 Value *Addr,
bool IsWrite,
1804 size_t AccessSizeIndex,
1805 Value *SizeArgument,
1807 RuntimeCallInserter &RTCI) {
1813 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1814 {Addr, SizeArgument});
1816 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1817 {Addr, SizeArgument, ExpVal});
1820 Call = RTCI.createRuntimeCall(
1821 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1823 Call = RTCI.createRuntimeCall(
1824 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1833 uint32_t TypeStoreSize) {
1834 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1836 Value *LastAccessedByte =
1837 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1839 if (TypeStoreSize / 8 > 1)
1841 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1844 IRB.
CreateIntCast(LastAccessedByte, ShadowValue->getType(),
false);
1849Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1851 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1858 return InsertBefore;
1863 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1865 Value *AddrSpaceZeroLanding =
1868 return InsertBefore;
1884 Trm->getParent()->setName(
"asan.report");
1895void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1898 uint32_t TypeStoreSize,
bool IsWrite,
1899 Value *SizeArgument,
bool UseCalls,
1901 RuntimeCallInserter &RTCI) {
1902 if (TargetTriple.isAMDGPU()) {
1903 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1904 TypeStoreSize, IsWrite, SizeArgument);
1913 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1916 ConstantInt::get(
Int32Ty, AccessInfo.Packed)});
1923 RTCI.createRuntimeCall(
1924 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1926 RTCI.createRuntimeCall(
1927 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1928 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1935 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1936 const uint64_t ShadowAlign =
1937 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1942 size_t Granularity = 1ULL << Mapping.Scale;
1945 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1947 if (TargetTriple.isAMDGCN()) {
1949 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1952 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1953 }
else if (GenSlowPath) {
1961 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1976 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1985void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1987 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
1988 uint32_t Exp, RuntimeCallInserter &RTCI) {
1996 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
1999 RTCI.createRuntimeCall(
2000 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2014void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
2020 Value *ModuleNameAddr =
2022 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2025 for (
auto &BB : GlobalInit)
2030void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2050 poisonOneInitializer(*
F);
2056ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2061 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2073bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2074 Type *Ty =
G->getValueType();
2077 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2079 if (!Ty->
isSized())
return false;
2080 if (!
G->hasInitializer())
return false;
2082 if (
G->getAddressSpace() &&
2089 if (
G->isThreadLocal())
return false;
2091 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2097 if (!TargetTriple.isOSBinFormatCOFF()) {
2098 if (!
G->hasExactDefinition() ||
G->hasComdat())
2102 if (
G->isInterposable())
2106 if (
G->hasAvailableExternallyLinkage())
2113 switch (
C->getSelectionKind()) {
2124 if (
G->hasSection()) {
2134 if (Section ==
"llvm.metadata")
return false;
2141 if (
Section.starts_with(
".preinit_array") ||
2142 Section.starts_with(
".init_array") ||
2143 Section.starts_with(
".fini_array")) {
2149 if (TargetTriple.isOSBinFormatELF()) {
2163 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2164 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2169 if (TargetTriple.isOSBinFormatMachO()) {
2171 unsigned TAA = 0, StubSize = 0;
2174 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2179 if (ParsedSegment ==
"__OBJC" ||
2180 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2192 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2205 if (CompileKernel) {
2208 if (
G->getName().starts_with(
"__"))
2218bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2219 if (!TargetTriple.isOSBinFormatMachO())
2222 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2224 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2226 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2228 if (TargetTriple.isDriverKit())
2230 if (TargetTriple.isXROS())
2236StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2237 switch (TargetTriple.getObjectFormat()) {
2247 "ModuleAddressSanitizer not implemented for object file format");
2254void ModuleAddressSanitizer::initializeCallbacks() {
2260 AsanUnpoisonGlobals =
2264 AsanRegisterGlobals =
M.getOrInsertFunction(
2266 AsanUnregisterGlobals =
M.getOrInsertFunction(
2271 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2273 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2276 AsanRegisterElfGlobals =
2278 IntptrTy, IntptrTy, IntptrTy);
2279 AsanUnregisterElfGlobals =
2281 IntptrTy, IntptrTy, IntptrTy);
2286void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2291 if (!
G->hasName()) {
2295 G->setName(
genName(
"anon_global"));
2298 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2299 std::string
Name = std::string(
G->getName());
2300 Name += InternalSuffix;
2301 C =
M.getOrInsertComdat(Name);
2303 C =
M.getOrInsertComdat(
G->getName());
2309 if (TargetTriple.isOSBinFormatCOFF()) {
2311 if (
G->hasPrivateLinkage())
2324ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2326 auto Linkage = TargetTriple.isOSBinFormatMachO()
2332 Metadata->setSection(getGlobalMetadataSection());
2339Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2343 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2351void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2355 auto &
DL =
M.getDataLayout();
2358 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2359 Constant *Initializer = MetadataInitializers[i];
2363 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2369 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2371 "global metadata will not be padded appropriately");
2374 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2379 if (!MetadataGlobals.empty())
2383void ModuleAddressSanitizer::instrumentGlobalsELF(
2386 const std::string &UniqueModuleId) {
2393 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2396 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2399 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2401 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2404 if (UseComdatForGlobalsGC)
2405 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2410 if (!MetadataGlobals.empty())
2427 "__start_" + getGlobalMetadataSection());
2431 "__stop_" + getGlobalMetadataSection());
2445 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2452void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2463 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2464 Constant *Initializer = MetadataInitializers[i];
2470 auto LivenessBinder =
2475 Twine(
"__asan_binder_") +
G->getName());
2476 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2477 LivenessGlobals[i] = Liveness;
2484 if (!LivenessGlobals.empty())
2506 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2511void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2515 unsigned N = ExtendedGlobals.
size();
2525 if (Mapping.Scale > 3)
2526 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2531 ConstantInt::get(IntptrTy,
N)});
2537 IrbDtor.CreateCall(AsanUnregisterGlobals,
2539 ConstantInt::get(IntptrTy,
N)});
2548void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2553 if (CompileKernel) {
2554 for (
auto &GA :
M.aliases()) {
2556 AliasedGlobalExclusions.
insert(GV);
2561 for (
auto &
G :
M.globals()) {
2562 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2566 size_t n = GlobalsToChange.
size();
2567 auto &
DL =
M.getDataLayout();
2581 IntptrTy, IntptrTy, IntptrTy);
2585 for (
size_t i = 0; i < n; i++) {
2589 if (
G->hasSanitizerMetadata())
2590 MD =
G->getSanitizerMetadata();
2595 std::string NameForGlobal =
G->getName().str();
2600 Type *Ty =
G->getValueType();
2601 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2614 M, NewTy,
G->isConstant(),
Linkage, NewInitializer,
"",
G,
2615 G->getThreadLocalMode(),
G->getAddressSpace());
2625 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2628 if (Seq && Seq->isCString())
2629 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2643 G->eraseFromParent();
2644 NewGlobals[i] = NewGlobal;
2649 bool CanUsePrivateAliases =
2650 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2651 TargetTriple.isOSBinFormatWasm();
2652 if (CanUsePrivateAliases && UsePrivateAlias) {
2655 InstrumentedGlobal =
2663 }
else if (UseOdrIndicator) {
2666 auto *ODRIndicatorSym =
2675 ODRIndicatorSym->setAlignment(
Align(1));
2676 ODRIndicator = ODRIndicatorSym;
2682 ConstantInt::get(IntptrTy, SizeInBytes),
2683 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2686 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2692 Initializers[i] = Initializer;
2698 for (
size_t i = 0; i < n; i++) {
2700 if (
G->getName().empty())
continue;
2705 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2712 }
else if (n == 0) {
2715 *CtorComdat = TargetTriple.isOSBinFormatELF();
2717 *CtorComdat =
false;
2718 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2719 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2720 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2721 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2723 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2729 createInitializerPoisonCalls();
2735ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes)
const {
2736 constexpr uint64_t kMaxRZ = 1 << 18;
2737 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2740 if (SizeInBytes <= MinRZ / 2) {
2744 RZ = MinRZ - SizeInBytes;
2747 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2750 if (SizeInBytes % MinRZ)
2751 RZ += MinRZ - (SizeInBytes % MinRZ);
2754 assert((RZ + SizeInBytes) % MinRZ == 0);
2759int ModuleAddressSanitizer::GetAsanVersion()
const {
2760 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2761 bool isAndroid =
M.getTargetTriple().isAndroid();
2765 Version += (LongSize == 32 && isAndroid);
2780bool ModuleAddressSanitizer::instrumentModule() {
2781 initializeCallbacks();
2789 if (CompileKernel) {
2794 std::string AsanVersion = std::to_string(GetAsanVersion());
2795 std::string VersionCheckName =
2797 std::tie(AsanCtorFunction, std::ignore) =
2800 {}, VersionCheckName);
2804 bool CtorComdat =
true;
2807 if (AsanCtorFunction) {
2808 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2809 instrumentGlobals(IRB, &CtorComdat);
2812 instrumentGlobals(IRB, &CtorComdat);
2821 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2822 if (AsanCtorFunction) {
2826 if (AsanDtorFunction) {
2831 if (AsanCtorFunction)
2833 if (AsanDtorFunction)
2844 for (
int Exp = 0;
Exp < 2;
Exp++) {
2845 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2846 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2847 const std::string ExpStr =
Exp ?
"exp_" :
"";
2848 const std::string EndingStr = Recover ?
"_noabort" :
"";
2858 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2859 AL2 = AL2.addParamAttribute(*
C, 2, AK);
2860 AL1 = AL1.addParamAttribute(*
C, 1, AK);
2863 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2867 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2872 AccessSizeIndex++) {
2873 const std::string Suffix = TypeStr +
itostr(1ULL << AccessSizeIndex);
2874 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2875 M.getOrInsertFunction(
2879 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2880 M.getOrInsertFunction(
2887 const std::string MemIntrinCallbackPrefix =
2891 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2892 PtrTy, PtrTy, PtrTy, IntptrTy);
2893 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2894 PtrTy, PtrTy, IntptrTy);
2895 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2899 AsanHandleNoReturnFunc =
2902 AsanPtrCmpFunction =
2904 AsanPtrSubFunction =
2906 if (Mapping.InGlobal)
2907 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2910 AMDGPUAddressShared =
2912 AMDGPUAddressPrivate =
2916bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2924 if (
F.getName().contains(
" load]")) {
2934bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2940 if (Mapping.InGlobal) {
2948 LocalDynamicShadow =
2949 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2951 LocalDynamicShadow =
2955 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2957 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2962void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2967 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2971 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2977 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2979 for (
Value *Arg :
II->args()) {
2982 "non-static alloca arg to localescape");
2983 ProcessedAllocas[AI] =
false;
2990bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
2991 bool ShouldInstrument =
2995 return !ShouldInstrument;
2998bool AddressSanitizer::instrumentFunction(
Function &
F,
3001 bool FunctionModified =
false;
3004 if (
F.hasFnAttribute(Attribute::Naked))
3005 return FunctionModified;
3010 if (maybeInsertAsanInitAtFunctionEntry(
F))
3011 FunctionModified =
true;
3014 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
3016 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3017 return FunctionModified;
3021 initializeCallbacks(TLI);
3023 FunctionStateRAII CleanupObj(
this);
3025 RuntimeCallInserter RTCI(
F);
3027 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
3031 markEscapedLocalAllocas(
F);
3043 for (
auto &BB :
F) {
3045 TempsToInstrument.
clear();
3046 int NumInsnsPerBB = 0;
3047 for (
auto &Inst : BB) {
3048 if (LooksLikeCodeInBug11395(&Inst))
return false;
3055 if (!InterestingOperands.
empty()) {
3056 for (
auto &Operand : InterestingOperands) {
3062 if (Operand.MaybeMask) {
3066 if (!TempsToInstrument.
insert(
Ptr).second)
3070 OperandsToInstrument.
push_back(Operand);
3077 PointerComparisonsOrSubtracts.
push_back(&Inst);
3085 TempsToInstrument.
clear();
3096 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3097 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3098 (
unsigned)InstrumentationWithCallsThreshold);
3103 int NumInstrumented = 0;
3104 for (
auto &Operand : OperandsToInstrument) {
3105 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3106 instrumentMop(ObjSizeVis, Operand, UseCalls,
3107 F.getDataLayout(), RTCI);
3108 FunctionModified =
true;
3110 for (
auto *Inst : IntrinToInstrument) {
3111 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3112 instrumentMemIntrinsic(Inst, RTCI);
3113 FunctionModified =
true;
3116 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3117 bool ChangedStack = FSP.runOnFunction();
3121 for (
auto *CI : NoReturnCalls) {
3123 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3126 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3127 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3128 FunctionModified =
true;
3131 if (ChangedStack || !NoReturnCalls.empty())
3132 FunctionModified =
true;
3134 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3137 return FunctionModified;
3143bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3144 if (LongSize != 32)
return false;
3153void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3157 const char *MallocNameTemplate =
3162 std::string Suffix =
itostr(Index);
3163 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3164 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3165 AsanStackFreeFunc[
Index] =
3170 if (ASan.UseAfterScope) {
3171 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3173 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3177 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3178 0xf3, 0xf5, 0xf8}) {
3179 std::ostringstream
Name;
3181 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3182 AsanSetShadowFunc[Val] =
3183 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3186 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3188 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3194 size_t Begin,
size_t End,
3196 Value *ShadowBase) {
3200 const size_t LargestStoreSizeInBytes =
3201 std::min<size_t>(
sizeof(uint64_t), ASan.LongSize / 8);
3203 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3209 for (
size_t i = Begin; i < End;) {
3210 if (!ShadowMask[i]) {
3216 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3218 while (StoreSizeInBytes > End - i)
3219 StoreSizeInBytes /= 2;
3222 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3223 while (j <= StoreSizeInBytes / 2)
3224 StoreSizeInBytes /= 2;
3228 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3230 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3232 Val = (Val << 8) | ShadowBytes[i + j];
3241 i += StoreSizeInBytes;
3248 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3253 size_t Begin,
size_t End,
3256 size_t Done = Begin;
3257 for (
size_t i = Begin, j = Begin + 1; i < End; i =
j++) {
3258 if (!ShadowMask[i]) {
3262 uint8_t Val = ShadowBytes[i];
3263 if (!AsanSetShadowFunc[Val])
3267 for (;
j < End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3270 if (j - i >= ASan.MaxInlinePoisoningSize) {
3271 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3272 RTCI.createRuntimeCall(
3273 IRB, AsanSetShadowFunc[Val],
3274 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3275 ConstantInt::get(IntptrTy, j - i)});
3280 copyToShadowInline(ShadowMask, ShadowBytes,
Done, End, IRB, ShadowBase);
3288 for (
int i = 0;; i++, MaxSize *= 2)
3289 if (LocalStackSize <= MaxSize)
return i;
3293void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3295 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3303 if (Arg.hasByValAttr()) {
3304 Type *Ty = Arg.getParamByValType();
3305 const Align Alignment =
3306 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3310 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3313 Arg.replaceAllUsesWith(AI);
3315 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3316 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3324 Value *ValueIfFalse) {
3327 PHI->addIncoming(ValueIfFalse, CondBlock);
3329 PHI->addIncoming(ValueIfTrue, ThenBlock);
3333Value *FunctionStackPoisoner::createAllocaForLayout(
3342 nullptr,
"MyAlloca");
3346 uint64_t FrameAlignment = std::max(
L.FrameAlignment, uint64_t(
ClRealignStack));
3351void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3354 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3359void FunctionStackPoisoner::processDynamicAllocas() {
3366 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3369 assert(ASan.isInterestingAlloca(*APC.AI));
3370 assert(!APC.AI->isStaticAlloca());
3373 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3380 createDynamicAllocasInitStorage();
3381 for (
auto &AI : DynamicAllocaVec)
3382 handleDynamicAllocaCall(AI);
3383 unpoisonDynamicAllocas();
3395 for (
Instruction *It = Start; It; It = It->getNextNode()) {
3412 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3415 Value *Val = Store->getValueOperand();
3417 bool IsArgInitViaCast =
3422 Val == It->getPrevNode();
3423 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3427 if (IsArgInitViaCast)
3442 if (AI->
hasMetadata(LLVMContext::MD_annotation)) {
3445 for (
auto &Annotation : AllocaAnnotations->
operands()) {
3449 for (
unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3452 auto MetadataString =
3454 if (MetadataString->getString() ==
"alloca_name_altered")
3463void FunctionStackPoisoner::processStaticAllocas() {
3464 if (AllocaVec.
empty()) {
3469 int StackMallocIdx = -1;
3471 if (
auto SP =
F.getSubprogram())
3472 EntryDebugLocation =
3481 auto InsBeforeB = InsBefore->
getParent();
3482 assert(InsBeforeB == &
F.getEntryBlock());
3483 for (
auto *AI : StaticAllocasToMoveUp)
3494 ArgInitInst->moveBefore(InsBefore->
getIterator());
3497 if (LocalEscapeCall)
3505 ASan.getAllocaSizeInBytes(*AI),
3516 uint64_t Granularity = 1ULL << Mapping.Scale;
3517 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3523 for (
auto &
Desc : SVD)
3527 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3530 assert(ASan.isInterestingAlloca(*APC.AI));
3531 assert(APC.AI->isStaticAlloca());
3536 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3537 if (LifetimeLoc->getFile() == FnLoc->getFile())
3538 if (
unsigned Line = LifetimeLoc->getLine())
3539 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3545 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3546 uint64_t LocalStackSize =
L.FrameSize;
3547 bool DoStackMalloc =
3557 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3558 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3560 Value *StaticAlloca =
3561 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3564 Value *LocalStackBase;
3565 Value *LocalStackBaseAlloca;
3568 if (DoStackMalloc) {
3569 LocalStackBaseAlloca =
3570 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3577 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3587 Value *FakeStackValue =
3588 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3589 ConstantInt::get(IntptrTy, LocalStackSize));
3591 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3592 ConstantInt::get(IntptrTy, 0));
3600 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3601 ConstantInt::get(IntptrTy, LocalStackSize));
3603 Value *NoFakeStack =
3608 Value *AllocaValue =
3609 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3612 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3613 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3618 FakeStack = ConstantInt::get(IntptrTy, 0);
3620 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3621 LocalStackBaseAlloca = LocalStackBase;
3627 Value *LocalStackBaseAllocaPtr =
3630 : LocalStackBaseAlloca;
3632 "Variable descriptions relative to ASan stack base will be dropped");
3636 for (
const auto &
Desc : SVD) {
3641 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3655 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3665 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3672 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3675 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3677 if (!StaticAllocaPoisonCallVec.empty()) {
3681 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3684 size_t Begin =
Desc.Offset /
L.Granularity;
3685 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3688 copyToShadow(ShadowAfterScope,
3689 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3695 for (
Value *NewAllocaPtr : NewAllocaPtrs) {
3698 if (
I->isLifetimeStartOrEnd())
3699 I->eraseFromParent();
3712 if (DoStackMalloc) {
3713 assert(StackMallocIdx >= 0);
3730 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3732 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3734 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3736 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3738 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3739 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3740 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3741 IRBPoison.CreateStore(
3743 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3746 RTCI.createRuntimeCall(
3747 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3748 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3752 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3754 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3759 for (
auto *AI : AllocaVec)
3763void FunctionStackPoisoner::poisonAlloca(
Value *V, uint64_t
Size,
3767 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3768 RTCI.createRuntimeCall(
3769 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3770 {AddrArg, SizeArg});
3781void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3789 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3795 const unsigned ElementSize =
3799 ConstantInt::get(IntptrTy, ElementSize));
3827 ConstantInt::get(IntptrTy, Alignment.
value()));
3830 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3841 if (
I->isLifetimeStartOrEnd())
3842 I->eraseFromParent();
3874 Size - uint64_t(
Offset) >= TypeStoreSize / 8;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Machine Check Debug Module
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
const Constant * getAliasee() const
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Base class for instruction visitors.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
ArrayRef< MDOperand > operands() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isOSHaiku() const
Tests whether the OS is Haiku.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
DWARFExpression::Operation Op
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
const uint8_t AccessSizeIndex
LLVM_ABI ASanAccessInfo(int32_t Packed)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.